aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig41
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c508
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c908
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h203
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h7
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.c85
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.h99
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c2
-rw-r--r--drivers/crypto/atmel-aes.c80
-rw-r--r--drivers/crypto/atmel-sha.c5
-rw-r--r--drivers/crypto/atmel-tdes.c23
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c14
-rw-r--r--drivers/crypto/bcm/cipher.c117
-rw-r--r--drivers/crypto/bcm/cipher.h3
-rw-r--r--drivers/crypto/bcm/util.c14
-rw-r--r--drivers/crypto/bfin_crc.c3
-rw-r--r--drivers/crypto/caam/caamalg.c130
-rw-r--r--drivers/crypto/caam/caamalg_desc.c182
-rw-r--r--drivers/crypto/caam/caamalg_desc.h10
-rw-r--r--drivers/crypto/caam/caamalg_qi.c75
-rw-r--r--drivers/crypto/caam/caamhash.c85
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c12
-rw-r--r--drivers/crypto/caam/desc.h31
-rw-r--r--drivers/crypto/caam/desc_constr.h51
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c4
-rw-r--r--drivers/crypto/caam/key_gen.c30
-rw-r--r--drivers/crypto/caam/key_gen.h30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c1
-rw-r--r--drivers/crypto/ccp/Kconfig12
-rw-r--r--drivers/crypto/ccp/Makefile1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c10
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c8
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c3
-rw-r--r--drivers/crypto/ccp/ccp-dev.c7
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c5
-rw-r--r--drivers/crypto/ccp/psp-dev.c805
-rw-r--r--drivers/crypto/ccp/psp-dev.h83
-rw-r--r--drivers/crypto/ccp/sp-dev.c35
-rw-r--r--drivers/crypto/ccp/sp-dev.h28
-rw-r--r--drivers/crypto/ccp/sp-pci.c52
-rw-r--r--drivers/crypto/chelsio/Kconfig11
-rw-r--r--drivers/crypto/chelsio/Makefile1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c2126
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h64
-rw-r--r--drivers/crypto/chelsio/chcr_core.c24
-rw-r--r--drivers/crypto/chelsio/chcr_core.h40
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h129
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c654
-rw-r--r--drivers/crypto/exynos-rng.c108
-rw-r--r--drivers/crypto/hifn_795x.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c371
-rw-r--r--drivers/crypto/inside-secure/safexcel.h173
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c136
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c220
-rw-r--r--drivers/crypto/ixp4xx_crypto.c8
-rw-r--r--drivers/crypto/marvell/cesa.c49
-rw-r--r--drivers/crypto/marvell/cesa.h27
-rw-r--r--drivers/crypto/marvell/cipher.c476
-rw-r--r--drivers/crypto/marvell/tdma.c5
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c39
-rw-r--r--drivers/crypto/mv_cesa.c1216
-rw-r--r--drivers/crypto/mv_cesa.h151
-rw-r--r--drivers/crypto/n2_core.c15
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c169
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c2
-rw-r--r--drivers/crypto/nx/nx-842.c2
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c9
-rw-r--r--drivers/crypto/nx/nx.c2
-rw-r--r--drivers/crypto/omap-aes-gcm.c11
-rw-r--r--drivers/crypto/omap-aes.c12
-rw-r--r--drivers/crypto/omap-des.c7
-rw-r--r--drivers/crypto/omap-sham.c7
-rw-r--r--drivers/crypto/padlock-aes.c4
-rw-r--r--drivers/crypto/padlock-sha.c2
-rw-r--r--drivers/crypto/picoxcell_crypto.c34
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c3
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c18
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c133
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c15
-rw-r--r--drivers/crypto/qce/ablkcipher.c5
-rw-r--r--drivers/crypto/qce/sha.c30
-rw-r--r--drivers/crypto/s5p-sss.c1626
-rw-r--r--drivers/crypto/stm32/Kconfig13
-rw-r--r--drivers/crypto/stm32/Makefile5
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c1170
-rw-r--r--drivers/crypto/stm32/stm32-hash.c20
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c2
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-prng.c6
-rw-r--r--drivers/crypto/talitos.c586
-rw-r--r--drivers/crypto/talitos.h7
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c1
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c2
-rw-r--r--drivers/crypto/vmx/aes_ctr.c33
99 files changed, 9146 insertions, 4646 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index fe33c199fc1a..4b741b83e23f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -199,22 +199,8 @@ config CRYPTO_CRC32_S390
It is available with IBM z13 or later.
-config CRYPTO_DEV_MV_CESA
- tristate "Marvell's Cryptographic Engine"
- depends on PLAT_ORION
- select CRYPTO_AES
- select CRYPTO_BLKCIPHER
- select CRYPTO_HASH
- select SRAM
- help
- This driver allows you to utilize the Cryptographic Engines and
- Security Accelerator (CESA) which can be found on the Marvell Orion
- and Kirkwood SoCs, such as QNAP's TS-209.
-
- Currently the driver supports AES in ECB and CBC mode without DMA.
-
config CRYPTO_DEV_MARVELL_CESA
- tristate "New Marvell's Cryptographic Engine driver"
+ tristate "Marvell's Cryptographic Engine driver"
depends on PLAT_ORION || ARCH_MVEBU
select CRYPTO_AES
select CRYPTO_DES
@@ -223,12 +209,10 @@ config CRYPTO_DEV_MARVELL_CESA
select SRAM
help
This driver allows you to utilize the Cryptographic Engines and
- Security Accelerator (CESA) which can be found on the Armada 370.
+ Security Accelerator (CESA) which can be found on MVEBU and ORION
+ platforms.
This driver supports CPU offload through DMA transfers.
- This driver is aimed at replacing the mv_cesa driver. This will only
- happen once it has received proper testing.
-
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_DES
@@ -315,6 +299,10 @@ config CRYPTO_DEV_PPC4XX
tristate "Driver AMCC PPC4xx crypto accelerator"
depends on PPC && 4xx
select CRYPTO_HASH
+ select CRYPTO_AEAD
+ select CRYPTO_AES
+ select CRYPTO_CCM
+ select CRYPTO_GCM
select CRYPTO_BLKCIPHER
help
This option allows you to have support for AMCC crypto acceleration.
@@ -439,6 +427,20 @@ config CRYPTO_DEV_S5P
Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
algorithms execution.
+config CRYPTO_DEV_EXYNOS_HASH
+ bool "Support for Samsung Exynos HASH accelerator"
+ depends on CRYPTO_DEV_S5P
+ depends on !CRYPTO_DEV_EXYNOS_RNG && CRYPTO_DEV_EXYNOS_RNG!=m
+ select CRYPTO_SHA1
+ select CRYPTO_MD5
+ select CRYPTO_SHA256
+ help
+ Select this to offload Exynos from HASH MD5/SHA1/SHA256.
+ This will select software SHA1, MD5 and SHA256 as they are
+ needed for small and zero-size messages.
+ HASH algorithms will be disabled if EXYNOS_RNG
+ is enabled due to hw conflict.
+
config CRYPTO_DEV_NX
bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
depends on PPC64
@@ -721,7 +723,6 @@ config CRYPTO_DEV_ARTPEC6
select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
- select CRYPTO_SHA384
select CRYPTO_SHA512
help
Enables the driver for the on-chip crypto accelerator
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c00708d04be6..2513d13ea2c4 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
-obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
index b95539928fdf..e33c185fc163 100644
--- a/drivers/crypto/amcc/Makefile
+++ b/drivers/crypto/amcc/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
-crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
+crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o
crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 4afca3968773..ea83d0bff0e9 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -26,11 +26,14 @@
#include <crypto/internal/hash.h>
#include <linux/dma-mapping.h>
#include <crypto/algapi.h>
+#include <crypto/aead.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
+#include <crypto/ctr.h>
#include "crypto4xx_reg_def.h"
-#include "crypto4xx_sa.h"
#include "crypto4xx_core.h"
+#include "crypto4xx_sa.h"
static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
u32 save_iv, u32 ld_h, u32 ld_iv,
@@ -62,6 +65,7 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
sa->sa_command_1.bf.feedback_mode = cfb,
sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_command_1.bf.hmac_muting = hmac_mc;
sa->sa_command_1.bf.extended_seq_num = esn;
sa->sa_command_1.bf.seq_num_mask = sn_mask;
sa->sa_command_1.bf.mutable_bit_proc = mute;
@@ -73,29 +77,29 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
int crypto4xx_encrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int ivlen = crypto_ablkcipher_ivsize(
+ crypto_ablkcipher_reqtfm(req));
+ __le32 iv[ivlen];
- ctx->direction = DIR_OUTBOUND;
- ctx->hash_final = 0;
- ctx->is_hash = 0;
- ctx->pd_ctl = 0x1;
+ if (ivlen)
+ crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- get_dynamic_sa_iv_size(ctx));
+ req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0);
}
int crypto4xx_decrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int ivlen = crypto_ablkcipher_ivsize(
+ crypto_ablkcipher_reqtfm(req));
+ __le32 iv[ivlen];
- ctx->direction = DIR_INBOUND;
- ctx->hash_final = 0;
- ctx->is_hash = 0;
- ctx->pd_ctl = 1;
+ if (ivlen)
+ crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- get_dynamic_sa_iv_size(ctx));
+ req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0);
}
/**
@@ -120,23 +124,15 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
}
/* Create SA */
- if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ if (ctx->sa_in || ctx->sa_out)
crypto4xx_free_sa(ctx);
rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
if (rc)
return rc;
- if (ctx->state_record_dma_addr == 0) {
- rc = crypto4xx_alloc_state_record(ctx);
- if (rc) {
- crypto4xx_free_sa(ctx);
- return rc;
- }
- }
/* Setup SA */
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- ctx->hash_final = 0;
+ sa = ctx->sa_in;
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
@@ -150,18 +146,13 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
- crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
- key, keylen);
- sa->sa_contents = SA_AES_CONTENTS | (keylen << 2);
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+ key, keylen);
+ sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
sa->sa_command_1.bf.key_len = keylen >> 3;
- ctx->is_hash = 0;
- ctx->direction = DIR_INBOUND;
- memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
- (void *)&ctx->state_record_dma_addr, 4);
- ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
- sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa = ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
return 0;
@@ -174,6 +165,392 @@ int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
+ CRYPTO_FEEDBACK_MODE_128BIT_CFB);
+}
+
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
+ CRYPTO_FEEDBACK_MODE_64BIT_OFB);
+}
+
+int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ int rc;
+
+ rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE,
+ CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
+ if (rc)
+ return rc;
+
+ ctx->iv_nonce = cpu_to_le32p((u32 *)&key[keylen -
+ CTR_RFC3686_NONCE_SIZE]);
+
+ return 0;
+}
+
+int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ __le32 iv[AES_IV_SIZE / 4] = {
+ ctx->iv_nonce,
+ cpu_to_le32p((u32 *) req->info),
+ cpu_to_le32p((u32 *) (req->info + 4)),
+ cpu_to_le32(1) };
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, iv, AES_IV_SIZE,
+ ctx->sa_out, ctx->sa_len, 0);
+}
+
+int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ __le32 iv[AES_IV_SIZE / 4] = {
+ ctx->iv_nonce,
+ cpu_to_le32p((u32 *) req->info),
+ cpu_to_le32p((u32 *) (req->info + 4)),
+ cpu_to_le32(1) };
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, iv, AES_IV_SIZE,
+ ctx->sa_out, ctx->sa_len, 0);
+}
+
+static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
+ bool is_ccm, bool decrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+ /* authsize has to be a multiple of 4 */
+ if (aead->authsize & 3)
+ return true;
+
+ /*
+ * hardware does not handle cases where cryptlen
+ * is less than a block
+ */
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return true;
+
+ /* assoc len needs to be a multiple of 4 */
+ if (req->assoclen & 0x3)
+ return true;
+
+ /* CCM supports only counter field length of 2 and 4 bytes */
+ if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
+ return true;
+
+ return false;
+}
+
+static int crypto4xx_aead_fallback(struct aead_request *req,
+ struct crypto4xx_ctx *ctx, bool do_decrypt)
+{
+ char aead_req_data[sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->sw_cipher.aead)]
+ __aligned(__alignof__(struct aead_request));
+
+ struct aead_request *subreq = (void *) aead_req_data;
+
+ memset(subreq, 0, sizeof(aead_req_data));
+
+ aead_request_set_tfm(subreq, ctx->sw_cipher.aead);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+ return do_decrypt ? crypto_aead_decrypt(subreq) :
+ crypto_aead_encrypt(subreq);
+}
+
+static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx,
+ struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ int rc;
+
+ crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(ctx->sw_cipher.aead,
+ crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
+ rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
+ crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
+ crypto_aead_set_flags(cipher,
+ crypto_aead_get_flags(ctx->sw_cipher.aead) &
+ CRYPTO_TFM_RES_MASK);
+
+ return rc;
+}
+
+/**
+ * AES-CCM Functions
+ */
+
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ if (ctx->sa_in || ctx->sa_out)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen - 16) / 4);
+ if (rc)
+ return rc;
+
+ /* Setup SA */
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
+
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_HASH_DECRYPT, DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), key, keylen);
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+ return 0;
+}
+
+static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ unsigned int len = req->cryptlen;
+ __le32 iv[16];
+ u32 tmp_sa[ctx->sa_len * 4];
+ struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
+
+ if (crypto4xx_aead_need_fallback(req, true, decrypt))
+ return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+ if (decrypt)
+ len -= crypto_aead_authsize(aead);
+
+ memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, sizeof(tmp_sa));
+ sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
+
+ if (req->iv[0] == 1) {
+ /* CRYPTO_MODE_AES_ICM */
+ sa->sa_command_1.bf.crypto_mode9_8 = 1;
+ }
+
+ iv[3] = cpu_to_le32(0);
+ crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1));
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ len, iv, sizeof(iv),
+ sa, ctx->sa_len, req->assoclen);
+}
+
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_ccm(req, false);
+}
+
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_ccm(req, true);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
+}
+
+/**
+ * AES-GCM Functions
+ */
+
+static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
+{
+ switch (keylen) {
+ case 16:
+ case 24:
+ case 32:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_cipher *aes_tfm = NULL;
+ uint8_t src[16] = { 0 };
+ int rc = 0;
+
+ aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(aes_tfm)) {
+ rc = PTR_ERR(aes_tfm);
+ pr_warn("could not load aes cipher driver: %d\n", rc);
+ return rc;
+ }
+
+ rc = crypto_cipher_setkey(aes_tfm, key, keylen);
+ if (rc) {
+ pr_err("setkey() failed: %d\n", rc);
+ goto out;
+ }
+
+ crypto_cipher_encrypt_one(aes_tfm, src, src);
+ crypto4xx_memcpy_to_le32(hash_start, src, 16);
+out:
+ crypto_free_cipher(aes_tfm);
+ return rc;
+}
+
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
+ crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ if (ctx->sa_in || ctx->sa_out)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen - 16) / 4);
+ if (rc)
+ return rc;
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ sa->sa_contents.w = SA_AES_GCM_CONTENTS | (keylen << 2);
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON, SA_MC_DISABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+ key, keylen);
+
+ rc = crypto4xx_compute_gcm_hash_key_sw(get_dynamic_sa_inner_digest(sa),
+ key, keylen);
+ if (rc) {
+ pr_err("GCM hash key setting failed = %d\n", rc);
+ goto err;
+ }
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH;
+
+ return 0;
+err:
+ crypto4xx_free_sa(ctx);
+ return rc;
+}
+
+static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req,
+ bool decrypt)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int len = req->cryptlen;
+ __le32 iv[4];
+
+ if (crypto4xx_aead_need_fallback(req, false, decrypt))
+ return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+ crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
+ iv[3] = cpu_to_le32(1);
+
+ if (decrypt)
+ len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ len, iv, sizeof(iv),
+ decrypt ? ctx->sa_in : ctx->sa_out,
+ ctx->sa_len, req->assoclen);
+}
+
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_gcm(req, false);
+}
+
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_gcm(req, true);
+}
+
/**
* HASH SHA1 Functions
*/
@@ -183,53 +560,39 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
unsigned char hm)
{
struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct crypto4xx_alg *my_alg;
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
- struct dynamic_sa_ctl *sa;
- struct dynamic_sa_hash160 *sa_in;
+ struct dynamic_sa_hash160 *sa;
int rc;
+ my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
+ alg.u.hash);
ctx->dev = my_alg->dev;
- ctx->is_hash = 1;
- ctx->hash_final = 0;
/* Create SA */
- if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ if (ctx->sa_in || ctx->sa_out)
crypto4xx_free_sa(ctx);
rc = crypto4xx_alloc_sa(ctx, sa_len);
if (rc)
return rc;
- if (ctx->state_record_dma_addr == 0) {
- crypto4xx_alloc_state_record(ctx);
- if (!ctx->state_record_dma_addr) {
- crypto4xx_free_sa(ctx);
- return -ENOMEM;
- }
- }
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct crypto4xx_ctx));
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
+ set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
SA_OPCODE_HASH, DIR_INBOUND);
- set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+ set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
- ctx->direction = DIR_INBOUND;
- sa->sa_contents = SA_HASH160_CONTENTS;
- sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
/* Need to zero hash digest in SA */
- memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
- memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
- sa_in->state_ptr = ctx->state_record_dma_addr;
- ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
+ memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
return 0;
}
@@ -240,29 +603,27 @@ int crypto4xx_hash_init(struct ahash_request *req)
int ds;
struct dynamic_sa_ctl *sa;
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa = ctx->sa_in;
ds = crypto_ahash_digestsize(
__crypto_ahash_cast(req->base.tfm));
sa->sa_command_0.bf.digest_len = ds >> 2;
sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
- ctx->is_hash = 1;
- ctx->direction = DIR_INBOUND;
return 0;
}
int crypto4xx_hash_update(struct ahash_request *req)
{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct scatterlist dst;
+ unsigned int ds = crypto_ahash_digestsize(ahash);
- ctx->is_hash = 1;
- ctx->hash_final = 0;
- ctx->pd_ctl = 0x11;
- ctx->direction = DIR_INBOUND;
+ sg_init_one(&dst, req->result, ds);
- return crypto4xx_build_pd(&req->base, ctx, req->src,
- (struct scatterlist *) req->result,
- req->nbytes, NULL, 0);
+ return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+ req->nbytes, NULL, 0, ctx->sa_in,
+ ctx->sa_len, 0);
}
int crypto4xx_hash_final(struct ahash_request *req)
@@ -272,15 +633,16 @@ int crypto4xx_hash_final(struct ahash_request *req)
int crypto4xx_hash_digest(struct ahash_request *req)
{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct scatterlist dst;
+ unsigned int ds = crypto_ahash_digestsize(ahash);
- ctx->hash_final = 1;
- ctx->pd_ctl = 0x11;
- ctx->direction = DIR_INBOUND;
+ sg_init_one(&dst, req->result, ds);
- return crypto4xx_build_pd(&req->base, ctx, req->src,
- (struct scatterlist *) req->result,
- req->nbytes, NULL, 0);
+ return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+ req->nbytes, NULL, 0, ctx->sa_in,
+ ctx->sa_len, 0);
}
/**
@@ -291,5 +653,3 @@ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
SA_HASH_MODE_HASH);
}
-
-
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 65dc78b91dea..76f459ad2821 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -35,8 +35,14 @@
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/cacheflush.h>
+#include <crypto/aead.h>
#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
@@ -122,26 +128,29 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
- writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ if (dev->is_revb) {
+ writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
+ dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
+ writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
+ dev->ce_base + CRYPTO4XX_INT_EN);
+ } else {
+ writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ }
}
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
{
- ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
- &ctx->sa_in_dma_addr, GFP_ATOMIC);
+ ctx->sa_in = kzalloc(size * 4, GFP_ATOMIC);
if (ctx->sa_in == NULL)
return -ENOMEM;
- ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
- &ctx->sa_out_dma_addr, GFP_ATOMIC);
+ ctx->sa_out = kzalloc(size * 4, GFP_ATOMIC);
if (ctx->sa_out == NULL) {
- dma_free_coherent(ctx->dev->core_dev->device, size * 4,
- ctx->sa_in, ctx->sa_in_dma_addr);
+ kfree(ctx->sa_in);
+ ctx->sa_in = NULL;
return -ENOMEM;
}
- memset(ctx->sa_in, 0, size * 4);
- memset(ctx->sa_out, 0, size * 4);
ctx->sa_len = size;
return 0;
@@ -149,40 +158,13 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
{
- if (ctx->sa_in != NULL)
- dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
- ctx->sa_in, ctx->sa_in_dma_addr);
- if (ctx->sa_out != NULL)
- dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
- ctx->sa_out, ctx->sa_out_dma_addr);
-
- ctx->sa_in_dma_addr = 0;
- ctx->sa_out_dma_addr = 0;
+ kfree(ctx->sa_in);
+ ctx->sa_in = NULL;
+ kfree(ctx->sa_out);
+ ctx->sa_out = NULL;
ctx->sa_len = 0;
}
-u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
-{
- ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
- sizeof(struct sa_state_record),
- &ctx->state_record_dma_addr, GFP_ATOMIC);
- if (!ctx->state_record_dma_addr)
- return -ENOMEM;
- memset(ctx->state_record, 0, sizeof(struct sa_state_record));
-
- return 0;
-}
-
-void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
-{
- if (ctx->state_record != NULL)
- dma_free_coherent(ctx->dev->core_dev->device,
- sizeof(struct sa_state_record),
- ctx->state_record,
- ctx->state_record_dma_addr);
- ctx->state_record_dma_addr = 0;
-}
-
/**
* alloc memory for the gather ring
* no need to alloc buf for the ring
@@ -191,7 +173,6 @@ void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
{
int i;
- struct pd_uinfo *pd_uinfo;
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
&dev->pdr_pa, GFP_ATOMIC);
@@ -207,9 +188,9 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
dev->pdr_pa);
return -ENOMEM;
}
- memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
+ memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
- 256 * PPC4XX_NUM_PD,
+ sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
GFP_ATOMIC);
if (!dev->shadow_sa_pool)
@@ -221,16 +202,17 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
if (!dev->shadow_sr_pool)
return -ENOMEM;
for (i = 0; i < PPC4XX_NUM_PD; i++) {
- pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
- sizeof(struct pd_uinfo) * i);
+ struct ce_pd *pd = &dev->pdr[i];
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
+
+ pd->sa = dev->shadow_sa_pool_pa +
+ sizeof(union shadow_sa_buf) * i;
/* alloc 256 bytes which is enough for any kind of dynamic sa */
- pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
- pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
+ pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
/* alloc state record */
- pd_uinfo->sr_va = dev->shadow_sr_pool +
- sizeof(struct sa_state_record) * i;
+ pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
sizeof(struct sa_state_record) * i;
}
@@ -240,13 +222,16 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
{
- if (dev->pdr != NULL)
+ if (dev->pdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
dev->pdr, dev->pdr_pa);
+
if (dev->shadow_sa_pool)
- dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
- dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
+ dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+
if (dev->shadow_sr_pool)
dma_free_coherent(dev->core_dev->device,
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
@@ -273,28 +258,21 @@ static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
{
- struct pd_uinfo *pd_uinfo;
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+ u32 tail;
unsigned long flags;
- pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
- sizeof(struct pd_uinfo) * idx);
spin_lock_irqsave(&dev->core_dev->lock, flags);
+ pd_uinfo->state = PD_ENTRY_FREE;
+
if (dev->pdr_tail != PPC4XX_LAST_PD)
dev->pdr_tail++;
else
dev->pdr_tail = 0;
- pd_uinfo->state = PD_ENTRY_FREE;
+ tail = dev->pdr_tail;
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
- return 0;
-}
-
-static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
- dma_addr_t *pd_dma, u32 idx)
-{
- *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
-
- return dev->pdr + sizeof(struct ce_pd) * idx;
+ return tail;
}
/**
@@ -304,14 +282,12 @@ static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
- dev->gdr = dma_alloc_coherent(dev->core_dev->device,
- sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_ATOMIC);
if (!dev->gdr)
return -ENOMEM;
- memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
-
return 0;
}
@@ -326,10 +302,11 @@ static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
* when this function is called.
* preemption or interrupt must be disabled
*/
-u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
+static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
{
u32 retval;
u32 tmp;
+
if (n >= PPC4XX_NUM_GD)
return ERING_WAS_FULL;
@@ -372,7 +349,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
{
*gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
- return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
+ return &dev->gdr[idx];
}
/**
@@ -383,7 +360,6 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
{
int i;
- struct ce_sd *sd_array;
/* alloc memory for scatter descriptor ring */
dev->sdr = dma_alloc_coherent(dev->core_dev->device,
@@ -392,10 +368,9 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
if (!dev->sdr)
return -ENOMEM;
- dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
dev->scatter_buffer_va =
dma_alloc_coherent(dev->core_dev->device,
- dev->scatter_buffer_size * PPC4XX_NUM_SD,
+ PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
&dev->scatter_buffer_pa, GFP_ATOMIC);
if (!dev->scatter_buffer_va) {
dma_free_coherent(dev->core_dev->device,
@@ -404,11 +379,9 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
return -ENOMEM;
}
- sd_array = dev->sdr;
-
for (i = 0; i < PPC4XX_NUM_SD; i++) {
- sd_array[i].ptr = dev->scatter_buffer_pa +
- dev->scatter_buffer_size * i;
+ dev->sdr[i].ptr = dev->scatter_buffer_pa +
+ PPC4XX_SD_BUFFER_SIZE * i;
}
return 0;
@@ -416,14 +389,14 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
{
- if (dev->sdr != NULL)
+ if (dev->sdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_sd) * PPC4XX_NUM_SD,
dev->sdr, dev->sdr_pa);
- if (dev->scatter_buffer_va != NULL)
+ if (dev->scatter_buffer_va)
dma_free_coherent(dev->core_dev->device,
- dev->scatter_buffer_size * PPC4XX_NUM_SD,
+ PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
dev->scatter_buffer_va,
dev->scatter_buffer_pa);
}
@@ -477,63 +450,7 @@ static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
{
*sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
- return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
-}
-
-static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
- dma_addr_t *addr, u32 *length,
- u32 *idx, u32 *offset, u32 *nbytes)
-{
- u32 len;
-
- if (*length > dev->scatter_buffer_size) {
- memcpy(phys_to_virt(*addr),
- dev->scatter_buffer_va +
- *idx * dev->scatter_buffer_size + *offset,
- dev->scatter_buffer_size);
- *offset = 0;
- *length -= dev->scatter_buffer_size;
- *nbytes -= dev->scatter_buffer_size;
- if (*idx == PPC4XX_LAST_SD)
- *idx = 0;
- else
- (*idx)++;
- *addr = *addr + dev->scatter_buffer_size;
- return 1;
- } else if (*length < dev->scatter_buffer_size) {
- memcpy(phys_to_virt(*addr),
- dev->scatter_buffer_va +
- *idx * dev->scatter_buffer_size + *offset, *length);
- if ((*offset + *length) == dev->scatter_buffer_size) {
- if (*idx == PPC4XX_LAST_SD)
- *idx = 0;
- else
- (*idx)++;
- *nbytes -= *length;
- *offset = 0;
- } else {
- *nbytes -= *length;
- *offset += *length;
- }
-
- return 0;
- } else {
- len = (*nbytes <= dev->scatter_buffer_size) ?
- (*nbytes) : dev->scatter_buffer_size;
- memcpy(phys_to_virt(*addr),
- dev->scatter_buffer_va +
- *idx * dev->scatter_buffer_size + *offset,
- len);
- *offset = 0;
- *nbytes -= len;
-
- if (*idx == PPC4XX_LAST_SD)
- *idx = 0;
- else
- (*idx)++;
-
- return 0;
- }
+ return &dev->sdr[idx];
}
static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
@@ -542,66 +459,52 @@ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
u32 nbytes,
struct scatterlist *dst)
{
- dma_addr_t addr;
- u32 this_sd;
- u32 offset;
- u32 len;
- u32 i;
- u32 sg_len;
- struct scatterlist *sg;
+ unsigned int first_sd = pd_uinfo->first_sd;
+ unsigned int last_sd;
+ unsigned int overflow = 0;
+ unsigned int to_copy;
+ unsigned int dst_start = 0;
+
+ /*
+ * Because the scatter buffers are all neatly organized in one
+ * big continuous ringbuffer; scatterwalk_map_and_copy() can
+ * be instructed to copy a range of buffers in one go.
+ */
- this_sd = pd_uinfo->first_sd;
- offset = 0;
- i = 0;
+ last_sd = (first_sd + pd_uinfo->num_sd);
+ if (last_sd > PPC4XX_LAST_SD) {
+ last_sd = PPC4XX_LAST_SD;
+ overflow = last_sd % PPC4XX_NUM_SD;
+ }
while (nbytes) {
- sg = &dst[i];
- sg_len = sg->length;
- addr = dma_map_page(dev->core_dev->device, sg_page(sg),
- sg->offset, sg->length, DMA_TO_DEVICE);
-
- if (offset == 0) {
- len = (nbytes <= sg->length) ? nbytes : sg->length;
- while (crypto4xx_fill_one_page(dev, &addr, &len,
- &this_sd, &offset, &nbytes))
- ;
- if (!nbytes)
- return;
- i++;
- } else {
- len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
- nbytes : (dev->scatter_buffer_size - offset);
- len = (sg->length < len) ? sg->length : len;
- while (crypto4xx_fill_one_page(dev, &addr, &len,
- &this_sd, &offset, &nbytes))
- ;
- if (!nbytes)
- return;
- sg_len -= len;
- if (sg_len) {
- addr += len;
- while (crypto4xx_fill_one_page(dev, &addr,
- &sg_len, &this_sd, &offset, &nbytes))
- ;
- }
- i++;
+ void *buf = dev->scatter_buffer_va +
+ first_sd * PPC4XX_SD_BUFFER_SIZE;
+
+ to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
+ (1 + last_sd - first_sd));
+ scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
+ nbytes -= to_copy;
+
+ if (overflow) {
+ first_sd = 0;
+ last_sd = overflow;
+ dst_start += to_copy;
+ overflow = 0;
}
}
}
-static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
+static void crypto4xx_copy_digest_to_dst(void *dst,
+ struct pd_uinfo *pd_uinfo,
struct crypto4xx_ctx *ctx)
{
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- struct sa_state_record *state_record =
- (struct sa_state_record *) pd_uinfo->sr_va;
if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
- memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
+ memcpy(dst, pd_uinfo->sr_va->save_digest,
SA_HASH_ALG_SHA1_DIGEST_SIZE);
}
-
- return 0;
}
static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
@@ -623,7 +526,7 @@ static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
}
}
-static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
+static void crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
@@ -644,13 +547,13 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
dst->offset, dst->length, DMA_FROM_DEVICE);
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
- if (ablk_req->base.complete != NULL)
- ablk_req->base.complete(&ablk_req->base, 0);
- return 0;
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ ablkcipher_request_complete(ablk_req, -EINPROGRESS);
+ ablkcipher_request_complete(ablk_req, 0);
}
-static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
+static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo)
{
struct crypto4xx_ctx *ctx;
@@ -659,62 +562,93 @@ static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
ahash_req = ahash_request_cast(pd_uinfo->async_req);
ctx = crypto_tfm_ctx(ahash_req->base.tfm);
- crypto4xx_copy_digest_to_dst(pd_uinfo,
+ crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
crypto_tfm_ctx(ahash_req->base.tfm));
crypto4xx_ret_sg_desc(dev, pd_uinfo);
- /* call user provided callback function x */
- if (ahash_req->base.complete != NULL)
- ahash_req->base.complete(&ahash_req->base, 0);
- return 0;
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ ahash_request_complete(ahash_req, -EINPROGRESS);
+ ahash_request_complete(ahash_req, 0);
}
-static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+static void crypto4xx_aead_done(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo,
+ struct ce_pd *pd)
{
- struct ce_pd *pd;
- struct pd_uinfo *pd_uinfo;
+ struct aead_request *aead_req = container_of(pd_uinfo->async_req,
+ struct aead_request, base);
+ struct scatterlist *dst = pd_uinfo->dest_va;
+ size_t cp_len = crypto_aead_authsize(
+ crypto_aead_reqtfm(aead_req));
+ u32 icv[cp_len];
+ int err = 0;
- pd = dev->pdr + sizeof(struct ce_pd)*idx;
- pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
- if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
- CRYPTO_ALG_TYPE_ABLKCIPHER)
- return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
- else
- return crypto4xx_ahash_done(dev, pd_uinfo);
+ if (pd_uinfo->using_sd) {
+ crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+ pd->pd_ctl_len.bf.pkt_len,
+ dst);
+ } else {
+ __dma_sync_page(sg_page(dst), dst->offset, dst->length,
+ DMA_FROM_DEVICE);
+ }
+
+ if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
+ /* append icv at the end */
+ crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
+ cp_len);
+
+ scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
+ cp_len, 1);
+ } else {
+ /* check icv at the end */
+ scatterwalk_map_and_copy(icv, aead_req->src,
+ aead_req->assoclen + aead_req->cryptlen -
+ cp_len, cp_len, 0);
+
+ crypto4xx_memcpy_from_le32(icv, icv, cp_len);
+
+ if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
+ err = -EBADMSG;
+ }
+
+ crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+ if (pd->pd_ctl.bf.status & 0xff) {
+ if (!__ratelimit(&dev->aead_ratelimit)) {
+ if (pd->pd_ctl.bf.status & 2)
+ pr_err("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ pr_err("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ pr_err("error _notify\n");
+ pr_err("aead return err status = 0x%02x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ pr_err("pd pad_ctl = 0x%08x\n",
+ pd->pd_ctl.bf.pd_pad_ctl);
+ }
+ err = -EINVAL;
+ }
+
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ aead_request_complete(aead_req, -EINPROGRESS);
+
+ aead_request_complete(aead_req, err);
}
-/**
- * Note: Only use this function to copy items that is word aligned.
- */
-void crypto4xx_memcpy_le(unsigned int *dst,
- const unsigned char *buf,
- int len)
+static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
{
- u8 *tmp;
- for (; len >= 4; buf += 4, len -= 4)
- *dst++ = cpu_to_le32(*(unsigned int *) buf);
-
- tmp = (u8 *)dst;
- switch (len) {
- case 3:
- *tmp++ = 0;
- *tmp++ = *(buf+2);
- *tmp++ = *(buf+1);
- *tmp++ = *buf;
- break;
- case 2:
- *tmp++ = 0;
- *tmp++ = 0;
- *tmp++ = *(buf+1);
- *tmp++ = *buf;
+ struct ce_pd *pd = &dev->pdr[idx];
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+
+ switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
break;
- case 1:
- *tmp++ = 0;
- *tmp++ = 0;
- *tmp++ = 0;
- *tmp++ = *buf;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto4xx_aead_done(dev, pd_uinfo, pd);
break;
- default:
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto4xx_ahash_done(dev, pd_uinfo);
break;
}
}
@@ -729,17 +663,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
kfree(core_dev);
}
-void crypto4xx_return_pd(struct crypto4xx_device *dev,
- u32 pd_entry, struct ce_pd *pd,
- struct pd_uinfo *pd_uinfo)
-{
- /* irq should be already disabled */
- dev->pdr_head = pd_entry;
- pd->pd_ctl.w = 0;
- pd->pd_ctl_len.w = 0;
- pd_uinfo->state = PD_ENTRY_FREE;
-}
-
static u32 get_next_gd(u32 current)
{
if (current != PPC4XX_LAST_GD)
@@ -756,17 +679,19 @@ static u32 get_next_sd(u32 current)
return 0;
}
-u32 crypto4xx_build_pd(struct crypto_async_request *req,
+int crypto4xx_build_pd(struct crypto_async_request *req,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,
struct scatterlist *dst,
- unsigned int datalen,
- void *iv, u32 iv_len)
+ const unsigned int datalen,
+ const __le32 *iv, const u32 iv_len,
+ const struct dynamic_sa_ctl *req_sa,
+ const unsigned int sa_len,
+ const unsigned int assoclen)
{
+ struct scatterlist _dst[2];
struct crypto4xx_device *dev = ctx->dev;
- dma_addr_t addr, pd_dma, sd_dma, gd_dma;
struct dynamic_sa_ctl *sa;
- struct scatterlist *sg;
struct ce_gd *gd;
struct ce_pd *pd;
u32 num_gd, num_sd;
@@ -774,22 +699,30 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
u32 fst_sd = 0xffffffff;
u32 pd_entry;
unsigned long flags;
- struct pd_uinfo *pd_uinfo = NULL;
- unsigned int nbytes = datalen, idx;
- unsigned int ivlen = 0;
+ struct pd_uinfo *pd_uinfo;
+ unsigned int nbytes = datalen;
+ size_t offset_to_sr_ptr;
u32 gd_idx = 0;
+ int tmp;
+ bool is_busy;
- /* figure how many gd is needed */
- num_gd = sg_nents_for_len(src, datalen);
- if ((int)num_gd < 0) {
+ /* figure how many gd are needed */
+ tmp = sg_nents_for_len(src, assoclen + datalen);
+ if (tmp < 0) {
dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
- return -EINVAL;
+ return tmp;
}
- if (num_gd == 1)
- num_gd = 0;
+ if (tmp == 1)
+ tmp = 0;
+ num_gd = tmp;
- /* figure how many sd is needed */
- if (sg_is_last(dst) || ctx->is_hash) {
+ if (assoclen) {
+ nbytes += assoclen;
+ dst = scatterwalk_ffwd(_dst, dst, assoclen);
+ }
+
+ /* figure how many sd are needed */
+ if (sg_is_last(dst)) {
num_sd = 0;
} else {
if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@@ -808,6 +741,31 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
* already got must be return the original place.
*/
spin_lock_irqsave(&dev->core_dev->lock, flags);
+ /*
+ * Let the caller know to slow down, once more than 13/16ths = 81%
+ * of the available data contexts are being used simultaneously.
+ *
+ * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
+ * 31 more contexts. Before new requests have to be rejected.
+ */
+ if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
+ is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+ ((PPC4XX_NUM_PD * 13) / 16);
+ } else {
+ /*
+ * To fix contention issues between ipsec (no blacklog) and
+ * dm-crypto (backlog) reserve 32 entries for "no backlog"
+ * data contexts.
+ */
+ is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+ ((PPC4XX_NUM_PD * 15) / 16);
+
+ if (is_busy) {
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return -EBUSY;
+ }
+ }
+
if (num_gd) {
fst_gd = crypto4xx_get_n_gd(dev, num_gd);
if (fst_gd == ERING_WAS_FULL) {
@@ -835,38 +793,28 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
}
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
- pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
- sizeof(struct pd_uinfo) * pd_entry);
- pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
+ pd = &dev->pdr[pd_entry];
+ pd->sa_len = sa_len;
+
+ pd_uinfo = &dev->pdr_uinfo[pd_entry];
pd_uinfo->async_req = req;
pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd;
- if (iv_len || ctx->is_hash) {
- ivlen = iv_len;
- pd->sa = pd_uinfo->sa_pa;
- sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
- if (ctx->direction == DIR_INBOUND)
- memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
- else
- memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
+ if (iv_len)
+ memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
- memcpy((void *) sa + ctx->offset_to_sr_ptr,
- &pd_uinfo->sr_pa, 4);
+ sa = pd_uinfo->sa_va;
+ memcpy(sa, req_sa, sa_len * 4);
+
+ sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
+ offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+ *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
- if (iv_len)
- crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
- } else {
- if (ctx->direction == DIR_INBOUND) {
- pd->sa = ctx->sa_in_dma_addr;
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- } else {
- pd->sa = ctx->sa_out_dma_addr;
- sa = (struct dynamic_sa_ctl *) ctx->sa_out;
- }
- }
- pd->sa_len = ctx->sa_len;
if (num_gd) {
+ dma_addr_t gd_dma;
+ struct scatterlist *sg;
+
/* get first gd we are going to use */
gd_idx = fst_gd;
pd_uinfo->first_gd = fst_gd;
@@ -875,27 +823,30 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
pd->src = gd_dma;
/* enable gather */
sa->sa_command_0.bf.gather = 1;
- idx = 0;
- src = &src[0];
/* walk the sg, and setup gather array */
+
+ sg = src;
while (nbytes) {
- sg = &src[idx];
- addr = dma_map_page(dev->core_dev->device, sg_page(sg),
- sg->offset, sg->length, DMA_TO_DEVICE);
- gd->ptr = addr;
- gd->ctl_len.len = sg->length;
+ size_t len;
+
+ len = min(sg->length, nbytes);
+ gd->ptr = dma_map_page(dev->core_dev->device,
+ sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
+ gd->ctl_len.len = len;
gd->ctl_len.done = 0;
gd->ctl_len.ready = 1;
- if (sg->length >= nbytes)
+ if (len >= nbytes)
break;
+
nbytes -= sg->length;
gd_idx = get_next_gd(gd_idx);
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
- idx++;
+ sg = sg_next(sg);
}
} else {
pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
- src->offset, src->length, DMA_TO_DEVICE);
+ src->offset, min(nbytes, src->length),
+ DMA_TO_DEVICE);
/*
* Disable gather in sa command
*/
@@ -906,25 +857,24 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
pd_uinfo->first_gd = 0xffffffff;
pd_uinfo->num_gd = 0;
}
- if (ctx->is_hash || sg_is_last(dst)) {
+ if (sg_is_last(dst)) {
/*
* we know application give us dst a whole piece of memory
* no need to use scatter ring.
- * In case of is_hash, the icv is always at end of src data.
*/
pd_uinfo->using_sd = 0;
pd_uinfo->first_sd = 0xffffffff;
pd_uinfo->num_sd = 0;
pd_uinfo->dest_va = dst;
sa->sa_command_0.bf.scatter = 0;
- if (ctx->is_hash)
- pd->dest = virt_to_phys((void *)dst);
- else
- pd->dest = (u32)dma_map_page(dev->core_dev->device,
- sg_page(dst), dst->offset,
- dst->length, DMA_TO_DEVICE);
+ pd->dest = (u32)dma_map_page(dev->core_dev->device,
+ sg_page(dst), dst->offset,
+ min(datalen, dst->length),
+ DMA_TO_DEVICE);
} else {
+ dma_addr_t sd_dma;
struct ce_sd *sd = NULL;
+
u32 sd_idx = fst_sd;
nbytes = datalen;
sa->sa_command_0.bf.scatter = 1;
@@ -938,7 +888,6 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
sd->ctl.done = 0;
sd->ctl.rdy = 1;
/* sd->ptr should be setup by sd_init routine*/
- idx = 0;
if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
nbytes -= PPC4XX_SD_BUFFER_SIZE;
else
@@ -949,67 +898,97 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
/* setup scatter descriptor */
sd->ctl.done = 0;
sd->ctl.rdy = 1;
- if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
+ if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
nbytes -= PPC4XX_SD_BUFFER_SIZE;
- else
+ } else {
/*
* SD entry can hold PPC4XX_SD_BUFFER_SIZE,
* which is more than nbytes, so done.
*/
nbytes = 0;
+ }
}
}
- sa->sa_command_1.bf.hash_crypto_offset = 0;
- pd->pd_ctl.w = ctx->pd_ctl;
- pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
- pd_uinfo->state = PD_ENTRY_INUSE;
+ pd->pd_ctl.w = PD_CTL_HOST_READY |
+ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
+ (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
+ PD_CTL_HASH_FINAL : 0);
+ pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
+ pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
+
wmb();
/* write any value to push engine to read a pd */
+ writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
- return -EINPROGRESS;
+ return is_busy ? -EBUSY : -EINPROGRESS;
}
/**
* Algorithm Registration Functions
*/
-static int crypto4xx_alg_init(struct crypto_tfm *tfm)
+static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
+ struct crypto4xx_ctx *ctx)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
-
ctx->dev = amcc_alg->dev;
ctx->sa_in = NULL;
ctx->sa_out = NULL;
- ctx->sa_in_dma_addr = 0;
- ctx->sa_out_dma_addr = 0;
ctx->sa_len = 0;
+}
- switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- default:
- tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
- break;
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct crypto4xx_ctx));
- break;
- }
+static int crypto4xx_ablk_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *amcc_alg;
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
+ crypto4xx_ctx_init(amcc_alg, ctx);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
return 0;
}
-static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
+static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
-
crypto4xx_free_sa(ctx);
- crypto4xx_free_state_record(ctx);
}
-int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
- struct crypto4xx_alg_common *crypto_alg,
- int array_size)
+static void crypto4xx_ablk_exit(struct crypto_tfm *tfm)
+{
+ crypto4xx_common_exit(crypto_tfm_ctx(tfm));
+}
+
+static int crypto4xx_aead_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto4xx_alg *amcc_alg;
+
+ ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->sw_cipher.aead))
+ return PTR_ERR(ctx->sw_cipher.aead);
+
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
+ crypto4xx_ctx_init(amcc_alg, ctx);
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+ max(sizeof(struct crypto4xx_ctx), 32 +
+ crypto_aead_reqsize(ctx->sw_cipher.aead)));
+ return 0;
+}
+
+static void crypto4xx_aead_exit(struct crypto_aead *tfm)
+{
+ struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto4xx_common_exit(ctx);
+ crypto_free_aead(ctx->sw_cipher.aead);
+}
+
+static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
+ struct crypto4xx_alg_common *crypto_alg,
+ int array_size)
{
struct crypto4xx_alg *alg;
int i;
@@ -1024,6 +1003,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
alg->dev = sec_dev;
switch (alg->alg.type) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ rc = crypto_register_aead(&alg->alg.u.aead);
+ break;
+
case CRYPTO_ALG_TYPE_AHASH:
rc = crypto_register_ahash(&alg->alg.u.hash);
break;
@@ -1033,12 +1016,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
break;
}
- if (rc) {
- list_del(&alg->entry);
+ if (rc)
kfree(alg);
- } else {
+ else
list_add_tail(&alg->entry, &sec_dev->alg_list);
- }
}
return 0;
@@ -1055,6 +1036,10 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
crypto_unregister_ahash(&alg->alg.u.hash);
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_unregister_aead(&alg->alg.u.aead);
+ break;
+
default:
crypto_unregister_alg(&alg->alg.u.cipher);
}
@@ -1068,60 +1053,68 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
struct pd_uinfo *pd_uinfo;
struct ce_pd *pd;
- u32 tail;
-
- while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
- tail = core_dev->dev->pdr_tail;
- pd_uinfo = core_dev->dev->pdr_uinfo +
- sizeof(struct pd_uinfo)*tail;
- pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
- if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
- pd->pd_ctl.bf.pe_done &&
- !pd->pd_ctl.bf.host_ready) {
- pd->pd_ctl.bf.pe_done = 0;
+ u32 tail = core_dev->dev->pdr_tail;
+ u32 head = core_dev->dev->pdr_head;
+
+ do {
+ pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
+ pd = &core_dev->dev->pdr[tail];
+ if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
+ ((READ_ONCE(pd->pd_ctl.w) &
+ (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
+ PD_CTL_PE_DONE)) {
crypto4xx_pd_done(core_dev->dev, tail);
- crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
- pd_uinfo->state = PD_ENTRY_FREE;
+ tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
} else {
/* if tail not done, break */
break;
}
- }
+ } while (head != tail);
}
/**
* Top Half of isr.
*/
-static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
+ u32 clr_val)
{
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
- if (!core_dev->dev->ce_base)
- return 0;
-
- writel(PPC4XX_INTERRUPT_CLR,
- core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
tasklet_schedule(&core_dev->tasklet);
return IRQ_HANDLED;
}
+static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
+}
+
+static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
+ PPC4XX_TMO_ERR_INT);
+}
+
/**
* Supported Crypto Algorithms
*/
-struct crypto4xx_alg_common crypto4xx_alg[] = {
+static struct crypto4xx_alg_common crypto4xx_alg[] = {
/* Crypto AES modes */
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_alg_init,
- .cra_exit = crypto4xx_alg_exit,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
.cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
@@ -1134,6 +1127,147 @@ struct crypto4xx_alg_common crypto4xx_alg[] = {
}
}
}},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "cfb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cfb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ } },
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = crypto4xx_setkey_rfc3686,
+ .encrypt = crypto4xx_rfc3686_encrypt,
+ .decrypt = crypto4xx_rfc3686_decrypt,
+ }
+ }
+ } },
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = crypto4xx_setkey_aes_ecb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ } },
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "ofb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_ofb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ } },
+
+ /* AEAD */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+ .setkey = crypto4xx_setkey_aes_ccm,
+ .setauthsize = crypto4xx_setauthsize_aead,
+ .encrypt = crypto4xx_encrypt_aes_ccm,
+ .decrypt = crypto4xx_decrypt_aes_ccm,
+ .init = crypto4xx_aead_init,
+ .exit = crypto4xx_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ } },
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+ .setkey = crypto4xx_setkey_aes_gcm,
+ .setauthsize = crypto4xx_setauthsize_aead,
+ .encrypt = crypto4xx_encrypt_aes_gcm,
+ .decrypt = crypto4xx_decrypt_aes_gcm,
+ .init = crypto4xx_aead_init,
+ .exit = crypto4xx_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ } },
};
/**
@@ -1145,6 +1279,8 @@ static int crypto4xx_probe(struct platform_device *ofdev)
struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
+ u32 pvr;
+ bool is_revb = true;
rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (rc)
@@ -1161,6 +1297,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
mtdcri(SDR0, PPC405EX_SDR0_SRST,
mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+ is_revb = false;
} else if (of_find_compatible_node(NULL, NULL,
"amcc,ppc460sx-crypto")) {
mtdcri(SDR0, PPC460SX_SDR0_SRST,
@@ -1183,17 +1320,33 @@ static int crypto4xx_probe(struct platform_device *ofdev)
if (!core_dev->dev)
goto err_alloc_dev;
+ /*
+ * Older version of 460EX/GT have a hardware bug.
+ * Hence they do not support H/W based security intr coalescing
+ */
+ pvr = mfspr(SPRN_PVR);
+ if (is_revb && ((pvr >> 4) == 0x130218A)) {
+ u32 min = PVR_MIN(pvr);
+
+ if (min < 4) {
+ dev_info(dev, "RevA detected - disable interrupt coalescing\n");
+ is_revb = false;
+ }
+ }
+
core_dev->dev->core_dev = core_dev;
+ core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
+ ratelimit_default_init(&core_dev->dev->aead_ratelimit);
rc = crypto4xx_build_pdr(core_dev->dev);
if (rc)
goto err_build_pdr;
rc = crypto4xx_build_gdr(core_dev->dev);
if (rc)
- goto err_build_gdr;
+ goto err_build_pdr;
rc = crypto4xx_build_sdr(core_dev->dev);
if (rc)
@@ -1203,13 +1356,6 @@ static int crypto4xx_probe(struct platform_device *ofdev)
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
- /* Register for Crypto isr, Crypto Engine IRQ */
- core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
- core_dev->dev->name, dev);
- if (rc)
- goto err_request_irq;
-
core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
@@ -1217,6 +1363,15 @@ static int crypto4xx_probe(struct platform_device *ofdev)
goto err_iomap;
}
+ /* Register for Crypto isr, Crypto Engine IRQ */
+ core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ rc = request_irq(core_dev->irq, is_revb ?
+ crypto4xx_ce_interrupt_handler_revb :
+ crypto4xx_ce_interrupt_handler, 0,
+ KBUILD_MODNAME, dev);
+ if (rc)
+ goto err_request_irq;
+
/* need to setup pdr, rdr, gdr and sdr before this */
crypto4xx_hw_init(core_dev->dev);
@@ -1230,18 +1385,17 @@ static int crypto4xx_probe(struct platform_device *ofdev)
return 0;
err_start_dev:
- iounmap(core_dev->dev->ce_base);
-err_iomap:
free_irq(core_dev->irq, dev);
err_request_irq:
irq_dispose_mapping(core_dev->irq);
+ iounmap(core_dev->dev->ce_base);
+err_iomap:
tasklet_kill(&core_dev->tasklet);
- crypto4xx_destroy_sdr(core_dev->dev);
err_build_sdr:
+ crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
-err_build_gdr:
- crypto4xx_destroy_pdr(core_dev->dev);
err_build_pdr:
+ crypto4xx_destroy_pdr(core_dev->dev);
kfree(core_dev->dev);
err_alloc_dev:
kfree(core_dev);
@@ -1276,7 +1430,7 @@ MODULE_DEVICE_TABLE(of, crypto4xx_match);
static struct platform_driver crypto4xx_driver = {
.driver = {
- .name = MODULE_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index ecfdcfe3698d..23b726da6534 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -22,9 +22,11 @@
#ifndef __CRYPTO4XX_CORE_H__
#define __CRYPTO4XX_CORE_H__
+#include <linux/ratelimit.h>
#include <crypto/internal/hash.h>
-
-#define MODULE_NAME "crypto4xx"
+#include <crypto/internal/aead.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_sa.h"
#define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200
@@ -34,20 +36,28 @@
#define PPC405EX_CE_RESET 0x00000008
#define CRYPTO4XX_CRYPTO_PRIORITY 300
-#define PPC4XX_LAST_PD 63
-#define PPC4XX_NUM_PD 64
-#define PPC4XX_LAST_GD 1023
+#define PPC4XX_NUM_PD 256
+#define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1)
#define PPC4XX_NUM_GD 1024
-#define PPC4XX_LAST_SD 63
-#define PPC4XX_NUM_SD 64
+#define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1)
+#define PPC4XX_NUM_SD 256
+#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1)
#define PPC4XX_SD_BUFFER_SIZE 2048
-#define PD_ENTRY_INUSE 1
+#define PD_ENTRY_BUSY BIT(1)
+#define PD_ENTRY_INUSE BIT(0)
#define PD_ENTRY_FREE 0
#define ERING_WAS_FULL 0xffffffff
struct crypto4xx_device;
+union shadow_sa_buf {
+ struct dynamic_sa_ctl sa;
+
+ /* alloc 256 bytes which is enough for any kind of dynamic sa */
+ u8 buf[256];
+} __packed;
+
struct pd_uinfo {
struct crypto4xx_device *dev;
u32 state;
@@ -60,9 +70,8 @@ struct pd_uinfo {
used by this packet */
u32 num_sd; /* number of scatter discriptors
used by this packet */
- void *sa_va; /* shadow sa, when using cp from ctx->sa */
- u32 sa_pa;
- void *sr_va; /* state record for shadow sa */
+ struct dynamic_sa_ctl *sa_va; /* shadow sa */
+ struct sa_state_record *sr_va; /* state record for shadow sa */
u32 sr_pa;
struct scatterlist *dest_va;
struct crypto_async_request *async_req; /* base crypto request
@@ -71,28 +80,21 @@ struct pd_uinfo {
struct crypto4xx_device {
struct crypto4xx_core_device *core_dev;
- char *name;
- u64 ce_phy_address;
void __iomem *ce_base;
void __iomem *trng_base;
- void *pdr; /* base address of packet
- descriptor ring */
- dma_addr_t pdr_pa; /* physical address used to
- program ce pdr_base_register */
- void *gdr; /* gather descriptor ring */
- dma_addr_t gdr_pa; /* physical address used to
- program ce gdr_base_register */
- void *sdr; /* scatter descriptor ring */
- dma_addr_t sdr_pa; /* physical address used to
- program ce sdr_base_register */
+ struct ce_pd *pdr; /* base address of packet descriptor ring */
+ dma_addr_t pdr_pa; /* physical address of pdr_base_register */
+ struct ce_gd *gdr; /* gather descriptor ring */
+ dma_addr_t gdr_pa; /* physical address of gdr_base_register */
+ struct ce_sd *sdr; /* scatter descriptor ring */
+ dma_addr_t sdr_pa; /* physical address of sdr_base_register */
void *scatter_buffer_va;
dma_addr_t scatter_buffer_pa;
- u32 scatter_buffer_size;
- void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */
+ union shadow_sa_buf *shadow_sa_pool;
dma_addr_t shadow_sa_pool_pa;
- void *shadow_sr_pool; /* pool of memory for sr in pd_uinfo */
+ struct sa_state_record *shadow_sr_pool;
dma_addr_t shadow_sr_pool_pa;
u32 pdr_tail;
u32 pdr_head;
@@ -100,9 +102,11 @@ struct crypto4xx_device {
u32 gdr_head;
u32 sdr_tail;
u32 sdr_head;
- void *pdr_uinfo;
+ struct pd_uinfo *pdr_uinfo;
struct list_head alg_list; /* List of algorithm supported
by this device */
+ struct ratelimit_state aead_ratelimit;
+ bool is_revb;
};
struct crypto4xx_core_device {
@@ -118,30 +122,13 @@ struct crypto4xx_core_device {
struct crypto4xx_ctx {
struct crypto4xx_device *dev;
- void *sa_in;
- dma_addr_t sa_in_dma_addr;
- void *sa_out;
- dma_addr_t sa_out_dma_addr;
- void *state_record;
- dma_addr_t state_record_dma_addr;
+ struct dynamic_sa_ctl *sa_in;
+ struct dynamic_sa_ctl *sa_out;
+ __le32 iv_nonce;
u32 sa_len;
- u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */
- u32 direction;
- u32 next_hdr;
- u32 save_iv;
- u32 pd_ctl_len;
- u32 pd_ctl;
- u32 bypass;
- u32 is_hash;
- u32 hash_final;
-};
-
-struct crypto4xx_req_ctx {
- struct crypto4xx_device *dev; /* Device in which
- operation to send to */
- void *sa;
- u32 sa_dma_addr;
- u16 sa_len;
+ union {
+ struct crypto_aead *aead;
+ } sw_cipher;
};
struct crypto4xx_alg_common {
@@ -149,6 +136,7 @@ struct crypto4xx_alg_common {
union {
struct crypto_alg cipher;
struct ahash_alg hash;
+ struct aead_alg aead;
} u;
};
@@ -158,43 +146,90 @@ struct crypto4xx_alg {
struct crypto4xx_device *dev;
};
-static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
- struct crypto_alg *x)
+int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+int crypto4xx_build_pd(struct crypto_async_request *req,
+ struct crypto4xx_ctx *ctx,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ const unsigned int datalen,
+ const __le32 *iv, const u32 iv_len,
+ const struct dynamic_sa_ctl *sa,
+ const unsigned int sa_len,
+ const unsigned int assoclen);
+int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt(struct ablkcipher_request *req);
+int crypto4xx_decrypt(struct ablkcipher_request *req);
+int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
+int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
+int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+int crypto4xx_hash_digest(struct ahash_request *req);
+int crypto4xx_hash_final(struct ahash_request *req);
+int crypto4xx_hash_update(struct ahash_request *req);
+int crypto4xx_hash_init(struct ahash_request *req);
+
+/**
+ * Note: Only use this function to copy items that is word aligned.
+ */
+static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
+ size_t len)
{
- switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AHASH:
- return container_of(__crypto_ahash_alg(x),
- struct crypto4xx_alg, alg.u.hash);
+ for (; len >= 4; buf += 4, len -= 4)
+ *dst++ = __swab32p((u32 *) buf);
+
+ if (len) {
+ const u8 *tmp = (u8 *)buf;
+
+ switch (len) {
+ case 3:
+ *dst = (tmp[2] << 16) |
+ (tmp[1] << 8) |
+ tmp[0];
+ break;
+ case 2:
+ *dst = (tmp[1] << 8) |
+ tmp[0];
+ break;
+ case 1:
+ *dst = tmp[0];
+ break;
+ default:
+ break;
+ }
}
+}
- return container_of(x, struct crypto4xx_alg, alg.u.cipher);
+static inline void crypto4xx_memcpy_from_le32(u32 *dst, const void *buf,
+ size_t len)
+{
+ crypto4xx_memcpy_swab32(dst, buf, len);
}
-extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
-extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
-extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
- struct crypto4xx_ctx *rctx);
-extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx);
-extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
-extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
-extern void crypto4xx_memcpy_le(unsigned int *dst,
- const unsigned char *buf, int len);
-extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
- struct crypto4xx_ctx *ctx,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int datalen,
- void *iv, u32 iv_len);
-extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen);
-extern int crypto4xx_encrypt(struct ablkcipher_request *req);
-extern int crypto4xx_decrypt(struct ablkcipher_request *req);
-extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
-extern int crypto4xx_hash_digest(struct ahash_request *req);
-extern int crypto4xx_hash_final(struct ahash_request *req);
-extern int crypto4xx_hash_update(struct ahash_request *req);
-extern int crypto4xx_hash_init(struct ahash_request *req);
+static inline void crypto4xx_memcpy_to_le32(__le32 *dst, const void *buf,
+ size_t len)
+{
+ crypto4xx_memcpy_swab32((u32 *)dst, buf, len);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *ciper,
+ unsigned int authsize);
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
+
#endif
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 279b8725559f..472331787e04 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -121,13 +121,15 @@
#define PPC4XX_PD_SIZE 6
#define PPC4XX_CTX_DONE_INT 0x2000
#define PPC4XX_PD_DONE_INT 0x8000
+#define PPC4XX_TMO_ERR_INT 0x40000
#define PPC4XX_BYTE_ORDER 0x22222
#define PPC4XX_INTERRUPT_CLR 0x3ffff
#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
#define PPC4XX_DC_3DES_EN 1
#define PPC4XX_TRNG_EN 0x00020000
-#define PPC4XX_INT_DESCR_CNT 4
+#define PPC4XX_INT_DESCR_CNT 7
#define PPC4XX_INT_TIMEOUT_CNT 0
+#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1
/**
* all follow define are ad hoc
@@ -261,6 +263,9 @@ union ce_pd_ctl {
} bf;
u32 w;
} __attribute__((packed));
+#define PD_CTL_HASH_FINAL BIT(4)
+#define PD_CTL_PE_DONE BIT(1)
+#define PD_CTL_HOST_READY BIT(0)
union ce_pd_ctl_len {
struct {
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c
deleted file mode 100644
index 69182e2cc3ea..000000000000
--- a/drivers/crypto/amcc/crypto4xx_sa.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * AMCC SoC PPC4xx Crypto Driver
- *
- * Copyright (c) 2008 Applied Micro Circuits Corporation.
- * All rights reserved. James Hsiao <jhsiao@amcc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * @file crypto4xx_sa.c
- *
- * This file implements the security context
- * associate format.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mod_devicetable.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock_types.h>
-#include <linux/highmem.h>
-#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/des.h>
-#include "crypto4xx_reg_def.h"
-#include "crypto4xx_sa.h"
-#include "crypto4xx_core.h"
-
-u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
-{
- u32 offset;
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
- else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
- offset = cts.bf.key_size
- + cts.bf.inner_size
- + cts.bf.outer_size
- + cts.bf.spi
- + cts.bf.seq_num0
- + cts.bf.seq_num1
- + cts.bf.seq_num_mask0
- + cts.bf.seq_num_mask1
- + cts.bf.seq_num_mask2
- + cts.bf.seq_num_mask3
- + cts.bf.iv0
- + cts.bf.iv1
- + cts.bf.iv2
- + cts.bf.iv3;
-
- return sizeof(struct dynamic_sa_ctl) + offset * 4;
-}
-
-u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
-{
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
- else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
- return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
-}
-
-u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx)
-{
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
- else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
-
- return sizeof(struct dynamic_sa_ctl);
-}
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
index 1352d58d4e34..a4d403528db5 100644
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -55,6 +55,8 @@ union dynamic_sa_contents {
#define SA_OP_GROUP_BASIC 0
#define SA_OPCODE_ENCRYPT 0
#define SA_OPCODE_DECRYPT 0
+#define SA_OPCODE_ENCRYPT_HASH 1
+#define SA_OPCODE_HASH_DECRYPT 1
#define SA_OPCODE_HASH 3
#define SA_CIPHER_ALG_DES 0
#define SA_CIPHER_ALG_3DES 1
@@ -65,6 +67,8 @@ union dynamic_sa_contents {
#define SA_HASH_ALG_MD5 0
#define SA_HASH_ALG_SHA1 1
+#define SA_HASH_ALG_GHASH 12
+#define SA_HASH_ALG_CBC_MAC 14
#define SA_HASH_ALG_NULL 15
#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20
@@ -112,6 +116,9 @@ union sa_command_0 {
#define CRYPTO_MODE_ECB 0
#define CRYPTO_MODE_CBC 1
+#define CRYPTO_MODE_OFB 2
+#define CRYPTO_MODE_CFB 3
+#define CRYPTO_MODE_CTR 4
#define CRYPTO_FEEDBACK_MODE_NO_FB 0
#define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0
@@ -169,7 +176,7 @@ union sa_command_1 {
} __attribute__((packed));
struct dynamic_sa_ctl {
- u32 sa_contents;
+ union dynamic_sa_contents sa_contents;
union sa_command_0 sa_command_0;
union sa_command_1 sa_command_1;
} __attribute__((packed));
@@ -178,9 +185,12 @@ struct dynamic_sa_ctl {
* State Record for Security Association (SA)
*/
struct sa_state_record {
- u32 save_iv[4];
- u32 save_hash_byte_cnt[2];
- u32 save_digest[16];
+ __le32 save_iv[4];
+ __le32 save_hash_byte_cnt[2];
+ union {
+ u32 save_digest[16]; /* for MD5/SHA */
+ __le32 save_digest_le32[16]; /* GHASH / CBC */
+ };
} __attribute__((packed));
/**
@@ -189,8 +199,8 @@ struct sa_state_record {
*/
struct dynamic_sa_aes128 {
struct dynamic_sa_ctl ctrl;
- u32 key[4];
- u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ __le32 key[4];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -203,8 +213,8 @@ struct dynamic_sa_aes128 {
*/
struct dynamic_sa_aes192 {
struct dynamic_sa_ctl ctrl;
- u32 key[6];
- u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ __le32 key[6];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -217,8 +227,8 @@ struct dynamic_sa_aes192 {
*/
struct dynamic_sa_aes256 {
struct dynamic_sa_ctl ctrl;
- u32 key[8];
- u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ __le32 key[8];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -228,16 +238,81 @@ struct dynamic_sa_aes256 {
#define SA_AES_CONTENTS 0x3e000002
/**
+ * Security Association (SA) for AES128 CCM
+ */
+struct dynamic_sa_aes128_ccm {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[4];
+ __le32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __packed;
+#define SA_AES128_CCM_LEN (sizeof(struct dynamic_sa_aes128_ccm)/4)
+#define SA_AES128_CCM_CONTENTS 0x3e000042
+#define SA_AES_CCM_CONTENTS 0x3e000002
+
+/**
+ * Security Association (SA) for AES128_GCM
+ */
+struct dynamic_sa_aes128_gcm {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[4];
+ __le32 inner_digest[4];
+ __le32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __packed;
+
+#define SA_AES128_GCM_LEN (sizeof(struct dynamic_sa_aes128_gcm)/4)
+#define SA_AES128_GCM_CONTENTS 0x3e000442
+#define SA_AES_GCM_CONTENTS 0x3e000402
+
+/**
* Security Association (SA) for HASH160: HMAC-SHA1
*/
struct dynamic_sa_hash160 {
struct dynamic_sa_ctl ctrl;
- u32 inner_digest[5];
- u32 outer_digest[5];
+ __le32 inner_digest[5];
+ __le32 outer_digest[5];
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
#define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4)
#define SA_HASH160_CONTENTS 0x2000a502
+static inline u32
+get_dynamic_sa_offset_state_ptr_field(struct dynamic_sa_ctl *cts)
+{
+ u32 offset;
+
+ offset = cts->sa_contents.bf.key_size
+ + cts->sa_contents.bf.inner_size
+ + cts->sa_contents.bf.outer_size
+ + cts->sa_contents.bf.spi
+ + cts->sa_contents.bf.seq_num0
+ + cts->sa_contents.bf.seq_num1
+ + cts->sa_contents.bf.seq_num_mask0
+ + cts->sa_contents.bf.seq_num_mask1
+ + cts->sa_contents.bf.seq_num_mask2
+ + cts->sa_contents.bf.seq_num_mask3
+ + cts->sa_contents.bf.iv0
+ + cts->sa_contents.bf.iv1
+ + cts->sa_contents.bf.iv2
+ + cts->sa_contents.bf.iv3;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+static inline __le32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
+{
+ return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
+}
+
+static inline __le32 *get_dynamic_sa_inner_digest(struct dynamic_sa_ctl *cts)
+{
+ return (__le32 *) ((unsigned long)cts +
+ sizeof(struct dynamic_sa_ctl) +
+ cts->sa_contents.bf.key_size * 4);
+}
+
#endif
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
index 677ca17fd223..5e63742b0d22 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -92,7 +92,7 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
if (!rng)
goto err_out;
- rng->name = MODULE_NAME;
+ rng->name = KBUILD_MODNAME;
rng->data_present = ppc4xx_trng_data_present;
rng->data_read = ppc4xx_trng_data_read;
rng->priv = (unsigned long) dev;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 29e20c37f3a6..691c6465b71e 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -36,6 +36,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <linux/platform_data/crypto-atmel.h>
@@ -76,12 +77,11 @@
AES_FLAGS_ENCRYPT | \
AES_FLAGS_GTAGEN)
-#define AES_FLAGS_INIT BIT(2)
#define AES_FLAGS_BUSY BIT(3)
#define AES_FLAGS_DUMP_REG BIT(4)
#define AES_FLAGS_OWN_SHA BIT(5)
-#define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY)
+#define AES_FLAGS_PERSISTENT AES_FLAGS_BUSY
#define ATMEL_AES_QUEUE_LENGTH 50
@@ -110,6 +110,7 @@ struct atmel_aes_base_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u16 block_size;
+ bool is_aead;
};
struct atmel_aes_ctx {
@@ -156,6 +157,7 @@ struct atmel_aes_authenc_ctx {
struct atmel_aes_reqctx {
unsigned long mode;
+ u32 lastc[AES_BLOCK_SIZE / sizeof(u32)];
};
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
@@ -448,11 +450,8 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
if (err)
return err;
- if (!(dd->flags & AES_FLAGS_INIT)) {
- atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
- atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
- dd->flags |= AES_FLAGS_INIT;
- }
+ atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
+ atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
return 0;
}
@@ -497,12 +496,34 @@ static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
- atmel_aes_authenc_complete(dd, err);
+ if (dd->ctx->is_aead)
+ atmel_aes_authenc_complete(dd, err);
#endif
clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
+ if (!dd->ctx->is_aead) {
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_ablkcipher *ablkcipher =
+ crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ if (rctx->mode & AES_FLAGS_ENCRYPT) {
+ scatterwalk_map_and_copy(req->info, req->dst,
+ req->nbytes - ivsize, ivsize, 0);
+ } else {
+ if (req->src == req->dst) {
+ memcpy(req->info, rctx->lastc, ivsize);
+ } else {
+ scatterwalk_map_and_copy(req->info, req->src,
+ req->nbytes - ivsize, ivsize, 0);
+ }
+ }
+ }
+
if (dd->is_async)
dd->areq->complete(dd->areq, err);
@@ -1071,11 +1092,11 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
- struct atmel_aes_base_ctx *ctx;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct atmel_aes_reqctx *rctx;
struct atmel_aes_dev *dd;
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
switch (mode & AES_FLAGS_OPMODE_MASK) {
case AES_FLAGS_CFB8:
ctx->block_size = CFB8_BLOCK_SIZE;
@@ -1097,6 +1118,7 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
ctx->block_size = AES_BLOCK_SIZE;
break;
}
+ ctx->is_aead = false;
dd = atmel_aes_find_dev(ctx);
if (!dd)
@@ -1105,6 +1127,13 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx = ablkcipher_request_ctx(req);
rctx->mode = mode;
+ if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ scatterwalk_map_and_copy(rctx->lastc, req->src,
+ (req->nbytes - ivsize), ivsize, 0);
+ }
+
return atmel_aes_handle_queue(dd, &req->base);
}
@@ -1236,10 +1265,6 @@ static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
static struct crypto_alg aes_algs[] = {
{
.cra_name = "ecb(aes)",
@@ -1252,7 +1277,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1272,7 +1296,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1293,7 +1316,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1314,7 +1336,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1335,7 +1356,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1356,7 +1376,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1377,7 +1396,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1398,7 +1416,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_ctr_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1421,7 +1438,6 @@ static struct crypto_alg aes_cfb64_alg = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1532,7 +1548,7 @@ static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
if (err)
return atmel_aes_complete(dd, err);
- if (likely(ivsize == 12)) {
+ if (likely(ivsize == GCM_AES_IV_SIZE)) {
memcpy(ctx->j0, iv, ivsize);
ctx->j0[3] = cpu_to_be32(1);
return atmel_aes_gcm_process(dd);
@@ -1739,6 +1755,7 @@ static int atmel_aes_gcm_crypt(struct aead_request *req,
ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
ctx->block_size = AES_BLOCK_SIZE;
+ ctx->is_aead = true;
dd = atmel_aes_find_dev(ctx);
if (!dd)
@@ -1808,19 +1825,13 @@ static int atmel_aes_gcm_init(struct crypto_aead *tfm)
return 0;
}
-static void atmel_aes_gcm_exit(struct crypto_aead *tfm)
-{
-
-}
-
static struct aead_alg aes_gcm_alg = {
.setkey = atmel_aes_gcm_setkey,
.setauthsize = atmel_aes_gcm_setauthsize,
.encrypt = atmel_aes_gcm_encrypt,
.decrypt = atmel_aes_gcm_decrypt,
.init = atmel_aes_gcm_init,
- .exit = atmel_aes_gcm_exit,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
@@ -1955,7 +1966,6 @@ static struct crypto_alg aes_xts_alg = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_xts_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -2223,6 +2233,7 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
rctx->base.mode = mode;
ctx->block_size = AES_BLOCK_SIZE;
+ ctx->is_aead = true;
dd = atmel_aes_find_dev(ctx);
if (!dd)
@@ -2382,7 +2393,6 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
struct crypto_platform_data *pdata)
{
struct at_dma_slave *slave;
- int err = -ENOMEM;
dma_cap_mask_t mask;
dma_cap_zero(mask);
@@ -2407,7 +2417,7 @@ err_dma_out:
dma_release_channel(dd->src.chan);
err_dma_in:
dev_warn(dd->dev, "no DMA channel available\n");
- return err;
+ return -ENODEV;
}
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
@@ -2658,8 +2668,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
- aes_dd->irq = -1;
-
/* Get the base address */
aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!aes_res) {
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 3e2f41b3eaf3..8874aa5ca0f7 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2628,7 +2628,6 @@ static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
struct crypto_platform_data *pdata)
{
- int err = -ENOMEM;
dma_cap_mask_t mask_in;
/* Try to grab DMA channel */
@@ -2639,7 +2638,7 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
if (!dd->dma_lch_in.chan) {
dev_warn(dd->dev, "no DMA channel available\n");
- return err;
+ return -ENODEV;
}
dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
@@ -2778,8 +2777,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
- sha_dd->irq = -1;
-
/* Get the base address */
sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!sha_res) {
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index f4b335dda568..592124f8382b 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -720,7 +720,6 @@ static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
struct crypto_platform_data *pdata)
{
- int err = -ENOMEM;
dma_cap_mask_t mask;
dma_cap_zero(mask);
@@ -765,7 +764,7 @@ err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
dev_warn(dd->dev, "no DMA channel available\n");
- return err;
+ return -ENODEV;
}
static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
@@ -912,10 +911,6 @@ static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static void atmel_tdes_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
static struct crypto_alg tdes_algs[] = {
{
.cra_name = "ecb(des)",
@@ -928,7 +923,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -948,7 +942,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -969,7 +962,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -990,7 +982,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1011,7 +1002,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1032,7 +1022,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1053,7 +1042,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1074,7 +1062,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
@@ -1094,7 +1081,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
@@ -1115,7 +1101,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 2*DES_KEY_SIZE,
@@ -1136,7 +1121,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 2*DES_KEY_SIZE,
@@ -1157,7 +1141,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 2*DES_KEY_SIZE,
@@ -1178,7 +1161,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 2*DES_KEY_SIZE,
@@ -1199,7 +1181,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
@@ -1382,8 +1363,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
- tdes_dd->irq = -1;
-
/* Get the base address */
tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!tdes_res) {
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 0f9754e07719..0fb8bbf41a8d 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
@@ -1934,7 +1935,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
// The HW omits the initial increment of the counter field.
- crypto_inc(req_ctx->hw_ctx.J0+12, 4);
+ memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
@@ -2072,9 +2073,9 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
del_timer(&ac->timer);
}
-static void artpec6_crypto_timeout(unsigned long data)
+static void artpec6_crypto_timeout(struct timer_list *t)
{
- struct artpec6_crypto *ac = (struct artpec6_crypto *) data;
+ struct artpec6_crypto *ac = from_timer(ac, t, timer);
dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
@@ -2956,7 +2957,7 @@ static struct aead_alg aead_algos[] = {
.setkey = artpec6_crypto_aead_set_key,
.encrypt = artpec6_crypto_aead_encrypt,
.decrypt = artpec6_crypto_aead_decrypt,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
@@ -3041,9 +3042,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
variant = (enum artpec6_crypto_variant)match->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -3063,7 +3061,7 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
spin_lock_init(&ac->queue_lock);
INIT_LIST_HEAD(&ac->queue);
INIT_LIST_HEAD(&ac->pending);
- setup_timer(&ac->timer, artpec6_crypto_timeout, (unsigned long) ac);
+ timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
ac->base = base;
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 8685c7e4debd..2b75f95bbe1b 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -42,7 +42,6 @@
#include <crypto/authenc.h>
#include <crypto/skcipher.h>
#include <crypto/hash.h>
-#include <crypto/aes.h>
#include <crypto/sha3.h>
#include "util.h"
@@ -256,6 +255,44 @@ spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
return 0;
}
+static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
+ u8 chan_idx)
+{
+ int err;
+ int retry_cnt = 0;
+ struct device *dev = &(iproc_priv.pdev->dev);
+
+ err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
+ if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
+ while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
+ /*
+ * Mailbox queue is full. Since MAY_SLEEP is set, assume
+ * not in atomic context and we can wait and try again.
+ */
+ retry_cnt++;
+ usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
+ err = mbox_send_message(iproc_priv.mbox[chan_idx],
+ mssg);
+ atomic_inc(&iproc_priv.mb_no_spc);
+ }
+ }
+ if (err < 0) {
+ atomic_inc(&iproc_priv.mb_send_fail);
+ return err;
+ }
+
+ /* Check error returned by mailbox controller */
+ err = mssg->error;
+ if (unlikely(err < 0)) {
+ dev_err(dev, "message error %d", err);
+ /* Signal txdone for mailbox channel */
+ }
+
+ /* Signal txdone for mailbox channel */
+ mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
+ return err;
+}
+
/**
* handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
* a single SPU request message, starting at the current position in the request
@@ -293,7 +330,6 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
u32 pad_len; /* total length of all padding */
bool update_key = false;
struct brcm_message *mssg; /* mailbox message */
- int retry_cnt = 0;
/* number of entries in src and dst sg in mailbox message. */
u8 rx_frag_num = 2; /* response header and STATUS */
@@ -462,24 +498,9 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
if (err)
return err;
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
- while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
- /*
- * Mailbox queue is full. Since MAY_SLEEP is set, assume
- * not in atomic context and we can wait and try again.
- */
- retry_cnt++;
- usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
- mssg);
- atomic_inc(&iproc_priv.mb_no_spc);
- }
- }
- if (unlikely(err < 0)) {
- atomic_inc(&iproc_priv.mb_send_fail);
+ err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+ if (unlikely(err < 0))
return err;
- }
return -EINPROGRESS;
}
@@ -710,7 +731,6 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
u32 spu_hdr_len;
unsigned int digestsize;
u16 rem = 0;
- int retry_cnt = 0;
/*
* number of entries in src and dst sg. Always includes SPU msg header.
@@ -904,24 +924,10 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
if (err)
return err;
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
- while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
- /*
- * Mailbox queue is full. Since MAY_SLEEP is set, assume
- * not in atomic context and we can wait and try again.
- */
- retry_cnt++;
- usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
- mssg);
- atomic_inc(&iproc_priv.mb_no_spc);
- }
- }
- if (err < 0) {
- atomic_inc(&iproc_priv.mb_send_fail);
+ err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+ if (unlikely(err < 0))
return err;
- }
+
return -EINPROGRESS;
}
@@ -1320,7 +1326,6 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
int assoc_nents = 0;
bool incl_icv = false;
unsigned int digestsize = ctx->digestsize;
- int retry_cnt = 0;
/* number of entries in src and dst sg. Always includes SPU msg header.
*/
@@ -1367,11 +1372,11 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
* expects AAD to include just SPI and seqno. So
* subtract off the IV len.
*/
- aead_parms.assoc_size -= GCM_ESP_IV_SIZE;
+ aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
if (rctx->is_encrypt) {
aead_parms.return_iv = true;
- aead_parms.ret_iv_len = GCM_ESP_IV_SIZE;
+ aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
}
} else {
@@ -1558,24 +1563,9 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
if (err)
return err;
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
- while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
- /*
- * Mailbox queue is full. Since MAY_SLEEP is set, assume
- * not in atomic context and we can wait and try again.
- */
- retry_cnt++;
- usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
- mssg);
- atomic_inc(&iproc_priv.mb_no_spc);
- }
- }
- if (err < 0) {
- atomic_inc(&iproc_priv.mb_send_fail);
+ err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+ if (unlikely(err < 0))
return err;
- }
return -EINPROGRESS;
}
@@ -3255,7 +3245,7 @@ static struct iproc_alg_s driver_algs[] = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
},
.setkey = aead_gcm_esp_setkey,
- .ivsize = GCM_ESP_IV_SIZE,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.cipher_info = {
@@ -3301,7 +3291,7 @@ static struct iproc_alg_s driver_algs[] = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
},
.setkey = rfc4543_gcm_esp_setkey,
- .ivsize = GCM_ESP_IV_SIZE,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.cipher_info = {
@@ -4537,7 +4527,7 @@ static int spu_mb_init(struct device *dev)
mcl->dev = dev;
mcl->tx_block = false;
mcl->tx_tout = 0;
- mcl->knows_txdone = false;
+ mcl->knows_txdone = true;
mcl->rx_callback = spu_rx_callback;
mcl->tx_done = NULL;
@@ -4818,7 +4808,6 @@ static int spu_dt_read(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct spu_hw *spu = &iproc_priv.spu;
struct resource *spu_ctrl_regs;
- const struct of_device_id *match;
const struct spu_type_subtype *matched_spu_type;
struct device_node *dn = pdev->dev.of_node;
int err, i;
@@ -4826,14 +4815,12 @@ static int spu_dt_read(struct platform_device *pdev)
/* Count number of mailbox channels */
spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
- match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
- if (!match) {
+ matched_spu_type = of_device_get_match_data(dev);
+ if (!matched_spu_type) {
dev_err(&pdev->dev, "Failed to match device\n");
return -ENODEV;
}
- matched_spu_type = match->data;
-
spu->spu_type = matched_spu_type->type;
spu->spu_subtype = matched_spu_type->subtype;
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 57a55eb2a255..763c425c41ca 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -23,6 +23,7 @@
#include <crypto/aes.h>
#include <crypto/internal/hash.h>
#include <crypto/aead.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/sha3.h>
@@ -39,8 +40,6 @@
#define ARC4_STATE_SIZE 4
#define CCM_AES_IV_SIZE 16
-#define GCM_AES_IV_SIZE 12
-#define GCM_ESP_IV_SIZE 8
#define CCM_ESP_IV_SIZE 8
#define RFC4543_ICV_SIZE 16
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index 430c5570ea87..d543c010ccd9 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -271,7 +271,7 @@ int do_shash(unsigned char *name, unsigned char *result,
hash = crypto_alloc_shash(name, 0, 0);
if (IS_ERR(hash)) {
rc = PTR_ERR(hash);
- pr_err("%s: Crypto %s allocation error %d", __func__, name, rc);
+ pr_err("%s: Crypto %s allocation error %d\n", __func__, name, rc);
return rc;
}
@@ -279,7 +279,7 @@ int do_shash(unsigned char *name, unsigned char *result,
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc) {
rc = -ENOMEM;
- pr_err("%s: Memory allocation failure", __func__);
+ pr_err("%s: Memory allocation failure\n", __func__);
goto do_shash_err;
}
sdesc->shash.tfm = hash;
@@ -288,31 +288,31 @@ int do_shash(unsigned char *name, unsigned char *result,
if (key_len > 0) {
rc = crypto_shash_setkey(hash, key, key_len);
if (rc) {
- pr_err("%s: Could not setkey %s shash", __func__, name);
+ pr_err("%s: Could not setkey %s shash\n", __func__, name);
goto do_shash_err;
}
}
rc = crypto_shash_init(&sdesc->shash);
if (rc) {
- pr_err("%s: Could not init %s shash", __func__, name);
+ pr_err("%s: Could not init %s shash\n", __func__, name);
goto do_shash_err;
}
rc = crypto_shash_update(&sdesc->shash, data1, data1_len);
if (rc) {
- pr_err("%s: Could not update1", __func__);
+ pr_err("%s: Could not update1\n", __func__);
goto do_shash_err;
}
if (data2 && data2_len) {
rc = crypto_shash_update(&sdesc->shash, data2, data2_len);
if (rc) {
- pr_err("%s: Could not update2", __func__);
+ pr_err("%s: Could not update2\n", __func__);
goto do_shash_err;
}
}
rc = crypto_shash_final(&sdesc->shash, result);
if (rc)
- pr_err("%s: Could not generate %s hash", __func__, name);
+ pr_err("%s: Could not generate %s hash\n", __func__, name);
do_shash_err:
crypto_free_shash(hash);
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index a118b9bed669..bfbf8bf77f03 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -494,7 +494,8 @@ static struct ahash_alg algs = {
.cra_driver_name = DRIVER_NAME,
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
.cra_alignmask = 3,
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 54f3b375a453..2188235be02d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -108,6 +108,7 @@ struct caam_ctx {
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
+ enum dma_data_direction dir;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
@@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx->adata.keylen_pad;
@@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0;
u32 *desc, *nonce = NULL;
u32 inl_mask;
@@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
- false);
+ false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
skip_enc:
/*
@@ -266,9 +271,9 @@ skip_enc:
desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, alg->caam.geniv, is_rfc3686,
- nonce, ctx1_iv_off, false);
+ nonce, ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -300,9 +305,9 @@ skip_enc:
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, false);
+ ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
skip_givenc:
return 0;
@@ -346,7 +351,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -363,7 +368,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -405,7 +410,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -422,7 +427,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -465,7 +470,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -482,7 +487,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -503,6 +508,7 @@ static int aead_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
@@ -517,6 +523,27 @@ static int aead_setkey(struct crypto_aead *aead,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
@@ -527,12 +554,14 @@ static int aead_setkey(struct crypto_aead *aead,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
+
+skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
return aead_set_sh_desc(aead);
badkey:
@@ -552,7 +581,7 @@ static int gcm_setkey(struct crypto_aead *aead,
#endif
memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
ctx->cdata.keylen = keylen;
return gcm_set_sh_desc(aead);
@@ -580,7 +609,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- DMA_TO_DEVICE);
+ ctx->dir);
return rfc4106_set_sh_desc(aead);
}
@@ -606,7 +635,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- DMA_TO_DEVICE);
+ ctx->dir);
return rfc4543_set_sh_desc(aead);
}
@@ -625,7 +654,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const bool is_rfc3686 = (ctr_mode &&
(strstr(alg_name, "rfc3686") != NULL));
- memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -648,9 +676,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* ablkcipher_encrypt shared descriptor */
@@ -658,21 +685,21 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -691,23 +718,21 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -EINVAL;
}
- memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -979,9 +1004,6 @@ static void init_aead_job(struct aead_request *req,
append_seq_out_ptr(desc, dst_dma,
req->assoclen + req->cryptlen - authsize,
out_options);
-
- /* REG3 = assoclen */
- append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
}
static void init_gcm_job(struct aead_request *req,
@@ -992,10 +1014,11 @@ static void init_gcm_job(struct aead_request *req,
struct caam_ctx *ctx = crypto_aead_ctx(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc = edesc->hw_desc;
- bool generic_gcm = (ivsize == 12);
+ bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
unsigned int last;
init_aead_job(req, edesc, all_contig, encrypt);
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
/* BUG This should not be specific to generic GCM. */
last = 0;
@@ -1004,7 +1027,7 @@ static void init_gcm_job(struct aead_request *req,
/* Read GCM IV */
append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
/* Append Salt */
if (!generic_gcm)
append_data(desc, ctx->key + ctx->cdata.keylen, 4);
@@ -1022,6 +1045,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -1045,6 +1069,15 @@ static void init_authenc_job(struct aead_request *req,
init_aead_job(req, edesc, all_contig, encrypt);
+ /*
+ * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
+ * having DPOVRD as destination.
+ */
+ if (ctrlpriv->era < 3)
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+ else
+ append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
+
if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB |
@@ -1953,7 +1986,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setauthsize = rfc4106_setauthsize,
.encrypt = ipsec_gcm_encrypt,
.decrypt = ipsec_gcm_decrypt,
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.caam = {
@@ -1971,7 +2004,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setauthsize = rfc4543_setauthsize,
.encrypt = ipsec_gcm_encrypt,
.decrypt = ipsec_gcm_decrypt,
- .ivsize = 8,
+ .ivsize = GCM_RFC4543_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.caam = {
@@ -1990,7 +2023,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setauthsize = gcm_setauthsize,
.encrypt = gcm_encrypt,
.decrypt = gcm_decrypt,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.caam = {
@@ -3228,9 +3261,11 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
{
dma_addr_t dma_addr;
+ struct caam_drv_private *priv;
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
@@ -3238,10 +3273,16 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ if (priv->era >= 6 && uses_dkp)
+ ctx->dir = DMA_BIDIRECTIONAL;
+ else
+ ctx->dir = DMA_TO_DEVICE;
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
offsetof(struct caam_ctx,
sh_desc_enc_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
caam_jr_free(ctx->jrdev);
@@ -3269,7 +3310,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam, false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3279,14 +3320,15 @@ static int caam_aead_init(struct crypto_aead *tfm)
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam,
+ alg->setkey == aead_setkey);
}
static void caam_exit_common(struct caam_ctx *ctx)
{
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
offsetof(struct caam_ctx, sh_desc_enc_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 530c14ee32de..ceb93fbb76e6 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *desc, u32 type)
* cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
* (non-protocol) with no (null) encryption.
* @desc: pointer to buffer used for descriptor construction
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full)
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize)
+ unsigned int icvsize, int era)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
@@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
- KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
set_jump_tgt_here(desc, key_jump_cmd);
/* assoclen + cryptlen = seqinlen */
@@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
* cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
* (non-protocol) with no (null) decryption.
* @desc: pointer to buffer used for descriptor construction
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full)
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize)
+ unsigned int icvsize, int era)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
@@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
set_jump_tgt_here(desc, key_jump_cmd);
/* Class 2 operation */
@@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
static void init_sh_desc_key_aead(u32 * const desc,
struct alginfo * const cdata,
struct alginfo * const adata,
- const bool is_rfc3686, u32 *nonce)
+ const bool is_rfc3686, u32 *nonce, int era)
{
u32 *key_jump_cmd;
unsigned int enckeylen = cdata->keylen;
@@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 * const desc,
if (is_rfc3686)
enckeylen -= CTR_RFC3686_NONCE_SIZE;
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, enckeylen,
@@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 * const desc,
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
+ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
+ int era)
{
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
}
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off, const bool is_qi)
+ const u32 ctx1_iv_off, const bool is_qi, int era)
{
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
}
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- if (geniv)
- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
- else
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
+ ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
+ CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
+ ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
+ CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi)
+ const bool is_qi, int era)
{
u32 geniv, moveiv;
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
if (is_qi) {
u32 *wait_load_cmd;
@@ -528,8 +561,13 @@ copy_iv:
OP_ALG_ENCRYPT);
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -1075,7 +1113,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
@@ -1140,7 +1178,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
@@ -1209,7 +1247,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
/* Load Nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index e412ec8f7005..5f9445ae2114 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -43,28 +43,28 @@
15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int icvsize, int era);
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int icvsize, int era);
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi);
+ const bool is_qi, int era);
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off, const bool is_qi);
+ const u32 ctx1_iv_off, const bool is_qi, int era);
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi);
+ const bool is_qi, int era);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
unsigned int icvsize);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 2eefc4a26bc2..4aecc9435f69 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -7,7 +7,7 @@
*/
#include "compat.h"
-
+#include "ctrl.h"
#include "regs.h"
#include "intern.h"
#include "desc_constr.h"
@@ -53,6 +53,7 @@ struct caam_ctx {
u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
+ enum dma_data_direction dir;
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
@@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
@@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, true);
+ ctx1_iv_off, true, ctrlpriv->era);
skip_enc:
/* aead_decrypt shared descriptor */
@@ -149,7 +151,8 @@ skip_enc:
cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, alg->caam.geniv,
- is_rfc3686, nonce, ctx1_iv_off, true);
+ is_rfc3686, nonce, ctx1_iv_off, true,
+ ctrlpriv->era);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -176,7 +179,7 @@ skip_enc:
cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, true);
+ ctx1_iv_off, true, ctrlpriv->era);
skip_givenc:
return 0;
@@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
@@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
@@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
+skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
@@ -272,7 +298,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
int ret = 0;
- memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -295,9 +320,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
@@ -356,10 +380,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -EINVAL;
}
- memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* xts ablkcipher encrypt, decrypt shared descriptors */
@@ -668,7 +690,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, op_type, 0, 0);
@@ -905,7 +927,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, op_type, 0, 0);
@@ -1058,7 +1080,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
}
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, GIVENCRYPT, 0, 0);
@@ -2123,7 +2145,8 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
{
struct caam_drv_private *priv;
@@ -2137,8 +2160,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ if (priv->era >= 6 && uses_dkp)
+ ctx->dir = DMA_BIDIRECTIONAL;
+ else
+ ctx->dir = DMA_TO_DEVICE;
+
ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
- DMA_TO_DEVICE);
+ ctx->dir);
if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
dev_err(ctx->jrdev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
@@ -2149,7 +2178,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- priv = dev_get_drvdata(ctx->jrdev->parent);
ctx->qidev = priv->qidev;
spin_lock_init(&ctx->lock);
@@ -2167,7 +2195,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam, false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2177,7 +2205,8 @@ static int caam_aead_init(struct crypto_aead *tfm)
aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam,
+ alg->setkey == aead_setkey);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -2186,8 +2215,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
- DMA_TO_DEVICE);
+ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
caam_jr_free(ctx->jrdev);
}
@@ -2312,6 +2340,11 @@ static int __init caam_qi_algapi_init(void)
if (!priv || !priv->qi_present)
return -ENODEV;
+ if (caam_dpaa2) {
+ dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
+ return -ENODEV;
+ }
+
INIT_LIST_HEAD(&alg_list);
/*
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 698580b60b2f..0beb28196e20 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -107,6 +107,7 @@ struct caam_hash_ctx {
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
+ enum dma_data_direction dir;
struct device *jrdev;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len;
@@ -218,7 +219,7 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev,
}
/* Map state->caam_ctx, and add it to link table */
-static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+static inline int ctx_map_to_sec4_sg(struct device *jrdev,
struct caam_hash_state *state, int ctx_len,
struct sec4_sg_entry *sec4_sg, u32 flag)
{
@@ -241,7 +242,8 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
* read and write to seqout
*/
static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
- struct caam_hash_ctx *ctx, bool import_ctx)
+ struct caam_hash_ctx *ctx, bool import_ctx,
+ int era)
{
u32 op = ctx->adata.algtype;
u32 *skip_key_load;
@@ -254,9 +256,12 @@ static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
- ctx->adata.keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6)
+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+ ctx->adata.keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_proto_dkp(desc, &ctx->adata);
set_jump_tgt_here(desc, skip_key_load);
@@ -289,13 +294,17 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
+ ctx->adata.key_virt = ctx->key;
+
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
@@ -304,9 +313,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
@@ -315,9 +325,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -326,9 +337,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
@@ -421,6 +433,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
u8 *hashed_key = NULL;
@@ -441,16 +454,26 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key;
}
- ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
- CAAM_MAX_HASH_KEY_SIZE);
- if (ret)
- goto bad_free_key;
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.key_inline = true;
+ ctx->adata.keylen = keylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad, 1);
-#endif
+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+ goto bad_free_key;
+
+ memcpy(ctx->key, key, keylen);
+ } else {
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
+ keylen, CAAM_MAX_HASH_KEY_SIZE);
+ if (ret)
+ goto bad_free_key;
+ }
kfree(hashed_key);
return ahash_set_sh_desc(ahash);
@@ -773,7 +796,7 @@ static int ahash_update_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -871,9 +894,8 @@ static int ahash_final_ctx(struct ahash_request *req)
desc = edesc->hw_desc;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->src_nents = 0;
- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
@@ -967,7 +989,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
@@ -1123,7 +1145,6 @@ static int ahash_final_no_ctx(struct ahash_request *req)
dev_err(jrdev, "unable to map dst\n");
goto unmap;
}
- edesc->src_nents = 0;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1205,7 +1226,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->dst_dma = 0;
ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
if (ret)
@@ -1417,7 +1437,6 @@ static int ahash_update_first(struct ahash_request *req)
}
edesc->src_nents = src_nents;
- edesc->dst_dma = 0;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
to_hash);
@@ -1719,6 +1738,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
dma_addr_t dma_addr;
+ struct caam_drv_private *priv;
/*
* Get a Job ring from Job Ring driver to ensure in-order
@@ -1730,10 +1750,13 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
caam_jr_free(ctx->jrdev);
@@ -1768,7 +1791,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 5f2f1b288d37..1c71e0cd5098 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -32,6 +32,7 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
#include <crypto/internal/aead.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 027e121c6f70..e843cf410373 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
* without any error (HW optimizations for later
* CAAM eras), then try again.
*/
+ if (ret)
+ break;
+
rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
- !(rdsta_val & (1 << sh_idx)))
+ !(rdsta_val & (1 << sh_idx))) {
ret = -EAGAIN;
- if (ret)
break;
+ }
+
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
/* Clear the contents before recreating the descriptor */
memset(desc, 0x00, CAAM_CMD_SZ * 7);
@@ -611,6 +615,8 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
+ ctrlpriv->era = caam_get_era();
+
ret = of_platform_populate(nprop, caam_match, NULL, dev);
if (ret) {
dev_err(dev, "JR platform devices creation error\n");
@@ -742,7 +748,7 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era());
+ ctrlpriv->era);
dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present,
caam_dpaa2 ? "yes" : "no");
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 6633fbb80e74..f76ff160a02c 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -444,6 +444,18 @@
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
@@ -1093,6 +1105,22 @@
/* MacSec protinfos */
#define OP_PCL_MACSEC 0x0001
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT 14
+#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT 12
+#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT 0
+#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
+
/* PKI unidirectional protocol protinfo bits */
#define OP_PCL_PKPROT_TEST 0x0008
#define OP_PCL_PKPROT_DECRYPT 0x0004
@@ -1440,7 +1468,7 @@
#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
@@ -1452,6 +1480,7 @@
#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index ba1ca0806f0a..d4256fa4a1d6 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -109,7 +109,7 @@ static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
append_ptr(desc, ptr);
}
-static inline void append_data(u32 * const desc, void *data, int len)
+static inline void append_data(u32 * const desc, const void *data, int len)
{
u32 *offset = desc_end(desc);
@@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
append_cmd(desc, len);
}
-static inline void append_cmd_data(u32 * const desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, const void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
@@ -271,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -312,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
@@ -452,7 +452,7 @@ struct alginfo {
unsigned int keylen_pad;
union {
dma_addr_t key_dma;
- void *key_virt;
+ const void *key_virt;
};
bool key_inline;
};
@@ -496,4 +496,45 @@ static inline int desc_inline_query(unsigned int sd_base_len,
return (rem_bytes >= 0) ? 0 : -1;
}
+/**
+ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * keylen should be the length of initial key, while keylen_pad
+ * the length of the derived (split) key.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ * SHA256, SHA384, SHA512}.
+ */
+static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
+{
+ u32 protid;
+
+ /*
+ * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
+ * to OP_PCLID_DKP_{MD5, SHA*}
+ */
+ protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
+ (0x20 << OP_ALG_ALGSEL_SHIFT);
+
+ if (adata->key_inline) {
+ int words;
+
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
+ adata->keylen);
+ append_data(desc, adata->key_virt, adata->keylen);
+
+ /* Reserve space in descriptor buffer for the derived key */
+ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
+ ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
+ if (words)
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
+ } else {
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
+ adata->keylen);
+ append_ptr(desc, adata->key_dma);
+ }
+}
+
#endif /* DESC_CONSTR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 91f1107276e5..7696a774a362 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -84,6 +84,7 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
+ int era; /* CAAM Era (internal HW revision) */
#define RNG4_MAX_HANDLES 2
/* RNG4 block */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d258953ff488..f4f258075b89 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -172,7 +172,7 @@ static void caam_jr_dequeue(unsigned long devarg)
while (rd_reg32(&jrp->rregs->outring_used)) {
- head = ACCESS_ONCE(jrp->head);
+ head = READ_ONCE(jrp->head);
spin_lock(&jrp->outlock);
@@ -341,7 +341,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
spin_lock_bh(&jrp->inplock);
head = jrp->head;
- tail = ACCESS_ONCE(jrp->tail);
+ tail = READ_ONCE(jrp->tail);
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 8c79c3a153dc..312b5f042f31 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -11,36 +11,6 @@
#include "desc_constr.h"
#include "key_gen.h"
-/**
- * split_key_len - Compute MDHA split key length for a given algorithm
- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- * SHA224, SHA384, SHA512.
- *
- * Return: MDHA split key length
- */
-static inline u32 split_key_len(u32 hash)
-{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
- u32 idx;
-
- idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
-
- return (u32)(mdpadlen[idx] * 2);
-}
-
-/**
- * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- * SHA224, SHA384, SHA512.
- *
- * Return: MDHA split key pad length
- */
-static inline u32 split_key_pad_len(u32 hash)
-{
- return ALIGN(split_key_len(hash), 16);
-}
-
void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index 5db055c25bd2..818f78f6fc1a 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -6,6 +6,36 @@
*
*/
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ u32 idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
struct split_key_result {
struct completion completion;
int err;
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 169e66231bcf..b0ba4331944b 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -459,7 +459,8 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
if (unlikely(!info->completion_addr)) {
dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto request_cleanup;
}
result = (union cpt_res_s *)info->completion_addr;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index bf9900bc4804..ab4ccf2f9e77 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -127,7 +127,7 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
* size and interrupt threshold.
*/
offset = NPS_PKT_IN_INSTR_BADDRX(i);
- nitrox_write_csr(ndev, NPS_PKT_IN_INSTR_BADDRX(i), cmdq->dma);
+ nitrox_write_csr(ndev, offset, cmdq->dma);
/* configure ring size */
offset = NPS_PKT_IN_INSTR_RSIZEX(i);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4addc238a6ef..deaefd532aaa 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -6,7 +6,6 @@
#include "nitrox_dev.h"
#include "nitrox_req.h"
#include "nitrox_csr.h"
-#include "nitrox_req.h"
/* SLC_STORE_INFO */
#define MIN_UDD_LEN 16
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 6d626606b9c5..b9dfae47aefd 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -1,5 +1,6 @@
config CRYPTO_DEV_CCP_DD
tristate "Secure Processor device driver"
+ depends on CPU_SUP_AMD || ARM64
default m
help
Provides AMD Secure Processor device driver.
@@ -32,3 +33,14 @@ config CRYPTO_DEV_CCP_CRYPTO
Support for using the cryptographic API with the AMD Cryptographic
Coprocessor. This module supports offload of SHA and AES algorithms.
If you choose 'M' here, this module will be called ccp_crypto.
+
+config CRYPTO_DEV_SP_PSP
+ bool "Platform Security Processor (PSP) device"
+ default y
+ depends on CRYPTO_DEV_CCP_DD && X86_64
+ help
+ Provide support for the AMD Platform Security Processor (PSP).
+ The PSP is a dedicated processor that provides support for key
+ management commands in Secure Encrypted Virtualization (SEV) mode,
+ along with software-based Trusted Execution Environment (TEE) to
+ enable third-party trusted applications.
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index c4ce726b931e..51d1c0cf66c7 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -8,6 +8,7 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
ccp-dmaengine.o \
ccp-debugfs.o
ccp-$(CONFIG_PCI) += sp-pci.o
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 52313524a4dd..ca1f0d780b61 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -19,13 +19,11 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
+#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
-#include <linux/delay.h>
#include "ccp-crypto.h"
-#define AES_GCM_IVSIZE 12
-
static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
{
return ret;
@@ -95,9 +93,9 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
*/
/* Prepare the IV: 12 bytes + an integer (counter) */
- memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE);
+ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
for (i = 0; i < 3; i++)
- rctx->iv[i + AES_GCM_IVSIZE] = 0;
+ rctx->iv[i + GCM_AES_IV_SIZE] = 0;
rctx->iv[AES_BLOCK_SIZE - 1] = 1;
/* Set up a scatterlist for the IV */
@@ -160,7 +158,7 @@ static struct aead_alg ccp_aes_gcm_defaults = {
.encrypt = ccp_aes_gcm_encrypt,
.decrypt = ccp_aes_gcm_decrypt,
.init = ccp_aes_gcm_cra_init,
- .ivsize = AES_GCM_IVSIZE,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 35a9de7fd475..b95d19974aa6 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
/* Check if the cmd can/should be queued */
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
- ret = -EBUSY;
- if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
+ if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
+ ret = -ENOSPC;
goto e_lock;
+ }
}
/* Look for an entry with the same tfm. If there is a cmd
@@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
ret = ccp_enqueue_cmd(crypto_cmd->cmd);
if (!ccp_crypto_success(ret))
goto e_lock; /* Error, don't queue it */
- if ((ret == -EBUSY) &&
- !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
- goto e_lock; /* Not backlogging, don't queue it */
}
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 65604fc65e8f..44a4d2779b15 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -788,13 +788,12 @@ static int ccp5_init(struct ccp_device *ccp)
struct ccp_cmd_queue *cmd_q;
struct dma_pool *dma_pool;
char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
- unsigned int qmr, qim, i;
+ unsigned int qmr, i;
u64 status;
u32 status_lo, status_hi;
int ret;
/* Find available queues */
- qim = 0;
qmr = ioread32(ccp->io_regs + Q_MASK_REG);
for (i = 0; i < MAX_HW_QUEUES; i++) {
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 4e029b176641..1b5035d56288 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd)
i = ccp->cmd_q_count;
if (ccp->cmd_count >= MAX_CMD_QLEN) {
- ret = -EBUSY;
- if (cmd->flags & CCP_CMD_MAY_BACKLOG)
+ if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
+ ret = -EBUSY;
list_add_tail(&cmd->entry, &ccp->backlog);
+ } else {
+ ret = -ENOSPC;
+ }
} else {
ret = -EINPROGRESS;
ccp->cmd_count++;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index d608043c0280..8b9da58459df 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -223,6 +223,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
desc->tx_desc.cookie, desc->status);
dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
}
desc = __ccp_next_dma_desc(chan, desc);
@@ -230,9 +231,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
spin_unlock_irqrestore(&chan->lock, flags);
if (tx_desc) {
- if (tx_desc->callback &&
- (tx_desc->flags & DMA_PREP_INTERRUPT))
- tx_desc->callback(tx_desc->callback_param);
+ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
dma_run_dependencies(tx_desc);
}
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
new file mode 100644
index 000000000000..b3afb6cc9d72
--- /dev/null
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -0,0 +1,805 @@
+/*
+ * AMD Platform Security Processor (PSP) interface
+ *
+ * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/ccp.h>
+
+#include "sp-dev.h"
+#include "psp-dev.h"
+
+#define DEVICE_NAME "sev"
+
+static DEFINE_MUTEX(sev_cmd_mutex);
+static struct sev_misc_dev *misc_dev;
+static struct psp_device *psp_master;
+
+static struct psp_device *psp_alloc_struct(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ struct psp_device *psp;
+
+ psp = devm_kzalloc(dev, sizeof(*psp), GFP_KERNEL);
+ if (!psp)
+ return NULL;
+
+ psp->dev = dev;
+ psp->sp = sp;
+
+ snprintf(psp->name, sizeof(psp->name), "psp-%u", sp->ord);
+
+ return psp;
+}
+
+static irqreturn_t psp_irq_handler(int irq, void *data)
+{
+ struct psp_device *psp = data;
+ unsigned int status;
+ int reg;
+
+ /* Read the interrupt status: */
+ status = ioread32(psp->io_regs + PSP_P2CMSG_INTSTS);
+
+ /* Check if it is command completion: */
+ if (!(status & BIT(PSP_CMD_COMPLETE_REG)))
+ goto done;
+
+ /* Check if it is SEV command completion: */
+ reg = ioread32(psp->io_regs + PSP_CMDRESP);
+ if (reg & PSP_CMDRESP_RESP) {
+ psp->sev_int_rcvd = 1;
+ wake_up(&psp->sev_int_queue);
+ }
+
+done:
+ /* Clear the interrupt status by writing the same value we read. */
+ iowrite32(status, psp->io_regs + PSP_P2CMSG_INTSTS);
+
+ return IRQ_HANDLED;
+}
+
+static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
+{
+ psp->sev_int_rcvd = 0;
+
+ wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
+ *reg = ioread32(psp->io_regs + PSP_CMDRESP);
+}
+
+static int sev_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_INIT: return sizeof(struct sev_data_init);
+ case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
+ case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
+ case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
+ case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
+ case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
+ case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
+ case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
+ case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
+ case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
+ case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
+ case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
+ case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
+ case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
+ case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
+ case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
+ case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
+ case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
+ case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
+ case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
+ case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
+ case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
+ case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
+ default: return 0;
+ }
+
+ return 0;
+}
+
+static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+{
+ struct psp_device *psp = psp_master;
+ unsigned int phys_lsb, phys_msb;
+ unsigned int reg, ret = 0;
+
+ if (!psp)
+ return -ENODEV;
+
+ /* Get the physical address of the command buffer */
+ phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
+ phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
+
+ dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n",
+ cmd, phys_msb, phys_lsb);
+
+ print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
+ iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
+
+ reg = cmd;
+ reg <<= PSP_CMDRESP_CMD_SHIFT;
+ reg |= PSP_CMDRESP_IOC;
+ iowrite32(reg, psp->io_regs + PSP_CMDRESP);
+
+ /* wait for command completion */
+ sev_wait_cmd_ioc(psp, &reg);
+
+ if (psp_ret)
+ *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
+
+ if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_dbg(psp->dev, "sev command %#x failed (%#010x)\n",
+ cmd, reg & PSP_CMDRESP_ERR_MASK);
+ ret = -EIO;
+ }
+
+ print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ return ret;
+}
+
+static int sev_do_cmd(int cmd, void *data, int *psp_ret)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_do_cmd_locked(cmd, data, psp_ret);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+ struct psp_device *psp = psp_master;
+ int rc = 0;
+
+ if (!psp)
+ return -ENODEV;
+
+ if (psp->sev_state == SEV_STATE_INIT)
+ return 0;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_INIT, &psp->init_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ psp->sev_state = SEV_STATE_INIT;
+ dev_dbg(psp->dev, "SEV firmware initialized\n");
+
+ return rc;
+}
+
+int sev_platform_init(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_init_locked(error);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sev_platform_init);
+
+static int __sev_platform_shutdown_locked(int *error)
+{
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
+ if (ret)
+ return ret;
+
+ psp_master->sev_state = SEV_STATE_UNINIT;
+ dev_dbg(psp_master->dev, "SEV firmware shutdown\n");
+
+ return ret;
+}
+
+static int sev_platform_shutdown(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_shutdown_locked(NULL);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int sev_get_platform_state(int *state, int *error)
+{
+ int rc;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
+ &psp_master->status_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ *state = psp_master->status_cmd_buf.state;
+ return rc;
+}
+
+static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
+{
+ int state, rc;
+
+ /*
+ * The SEV spec requires that FACTORY_RESET must be issued in
+ * UNINIT state. Before we go further lets check if any guest is
+ * active.
+ *
+ * If FW is in WORKING state then deny the request otherwise issue
+ * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
+ *
+ */
+ rc = sev_get_platform_state(&state, &argp->error);
+ if (rc)
+ return rc;
+
+ if (state == SEV_STATE_WORKING)
+ return -EBUSY;
+
+ if (state == SEV_STATE_INIT) {
+ rc = __sev_platform_shutdown_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_status *data = &psp_master->status_cmd_buf;
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
+{
+ int rc;
+
+ if (psp_master->sev_state == SEV_STATE_UNINIT) {
+ rc = __sev_platform_init_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_pek_csr input;
+ struct sev_data_pek_csr *data;
+ void *blob = NULL;
+ int ret;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* userspace wants to query CSR length */
+ if (!input.address || !input.length)
+ goto cmd;
+
+ /* allocate a physically contiguous buffer to store the CSR blob */
+ if (!access_ok(VERIFY_WRITE, input.address, input.length) ||
+ input.length > SEV_FW_BLOB_MAX_SIZE) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ blob = kmalloc(input.length, GFP_KERNEL);
+ if (!blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->address = __psp_pa(blob);
+ data->len = input.length;
+
+cmd:
+ if (psp_master->sev_state == SEV_STATE_UNINIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_blob;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
+
+ /* If we query the CSR length, FW responded with expected data. */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_blob;
+ }
+
+ if (blob) {
+ if (copy_to_user((void __user *)input.address, blob, input.length))
+ ret = -EFAULT;
+ }
+
+e_free_blob:
+ kfree(blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+{
+ void *data;
+
+ if (!uaddr || !len)
+ return ERR_PTR(-EINVAL);
+
+ /* verify that blob length does not exceed our limit */
+ if (len > SEV_FW_BLOB_MAX_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ data = kmalloc(len, GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(data, (void __user *)(uintptr_t)uaddr, len))
+ goto e_free;
+
+ return data;
+
+e_free:
+ kfree(data);
+ return ERR_PTR(-EFAULT);
+}
+EXPORT_SYMBOL_GPL(psp_copy_user_blob);
+
+static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_pek_cert_import input;
+ struct sev_data_pek_cert_import *data;
+ void *pek_blob, *oca_blob;
+ int ret;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* copy PEK certificate blobs from userspace */
+ pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
+ if (IS_ERR(pek_blob)) {
+ ret = PTR_ERR(pek_blob);
+ goto e_free;
+ }
+
+ data->pek_cert_address = __psp_pa(pek_blob);
+ data->pek_cert_len = input.pek_cert_len;
+
+ /* copy PEK certificate blobs from userspace */
+ oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
+ if (IS_ERR(oca_blob)) {
+ ret = PTR_ERR(oca_blob);
+ goto e_free_pek;
+ }
+
+ data->oca_cert_address = __psp_pa(oca_blob);
+ data->oca_cert_len = input.oca_cert_len;
+
+ /* If platform is not in INIT state then transition it to INIT */
+ if (psp_master->sev_state != SEV_STATE_INIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_oca;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
+
+e_free_oca:
+ kfree(oca_blob);
+e_free_pek:
+ kfree(pek_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_pdh_cert_export input;
+ void *pdh_blob = NULL, *cert_blob = NULL;
+ struct sev_data_pdh_cert_export *data;
+ int ret;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Userspace wants to query the certificate length. */
+ if (!input.pdh_cert_address ||
+ !input.pdh_cert_len ||
+ !input.cert_chain_address)
+ goto cmd;
+
+ /* Allocate a physically contiguous buffer to store the PDH blob. */
+ if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(VERIFY_WRITE, input.pdh_cert_address, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ /* Allocate a physically contiguous buffer to store the cert chain blob. */
+ if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(VERIFY_WRITE, input.cert_chain_address, input.cert_chain_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
+ if (!pdh_blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->pdh_cert_address = __psp_pa(pdh_blob);
+ data->pdh_cert_len = input.pdh_cert_len;
+
+ cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
+ if (!cert_blob) {
+ ret = -ENOMEM;
+ goto e_free_pdh;
+ }
+
+ data->cert_chain_address = __psp_pa(cert_blob);
+ data->cert_chain_len = input.cert_chain_len;
+
+cmd:
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (psp_master->sev_state != SEV_STATE_INIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_cert;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
+
+ /* If we query the length, FW responded with expected data. */
+ input.cert_chain_len = data->cert_chain_len;
+ input.pdh_cert_len = data->pdh_cert_len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+
+ if (pdh_blob) {
+ if (copy_to_user((void __user *)input.pdh_cert_address,
+ pdh_blob, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+ }
+
+ if (cert_blob) {
+ if (copy_to_user((void __user *)input.cert_chain_address,
+ cert_blob, input.cert_chain_len))
+ ret = -EFAULT;
+ }
+
+e_free_cert:
+ kfree(cert_blob);
+e_free_pdh:
+ kfree(pdh_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct sev_issue_cmd input;
+ int ret = -EFAULT;
+
+ if (!psp_master)
+ return -ENODEV;
+
+ if (ioctl != SEV_ISSUE_CMD)
+ return -EINVAL;
+
+ if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
+ return -EFAULT;
+
+ if (input.cmd > SEV_MAX)
+ return -EINVAL;
+
+ mutex_lock(&sev_cmd_mutex);
+
+ switch (input.cmd) {
+
+ case SEV_FACTORY_RESET:
+ ret = sev_ioctl_do_reset(&input);
+ break;
+ case SEV_PLATFORM_STATUS:
+ ret = sev_ioctl_do_platform_status(&input);
+ break;
+ case SEV_PEK_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
+ break;
+ case SEV_PDH_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
+ break;
+ case SEV_PEK_CSR:
+ ret = sev_ioctl_do_pek_csr(&input);
+ break;
+ case SEV_PEK_CERT_IMPORT:
+ ret = sev_ioctl_do_pek_import(&input);
+ break;
+ case SEV_PDH_CERT_EXPORT:
+ ret = sev_ioctl_do_pdh_export(&input);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
+ ret = -EFAULT;
+out:
+ mutex_unlock(&sev_cmd_mutex);
+
+ return ret;
+}
+
+static const struct file_operations sev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sev_ioctl,
+};
+
+int sev_platform_status(struct sev_user_data_status *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_platform_status);
+
+int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_deactivate);
+
+int sev_guest_activate(struct sev_data_activate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_activate);
+
+int sev_guest_decommission(struct sev_data_decommission *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_decommission);
+
+int sev_guest_df_flush(int *error)
+{
+ return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_df_flush);
+
+static void sev_exit(struct kref *ref)
+{
+ struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
+
+ misc_deregister(&misc_dev->misc);
+}
+
+static int sev_misc_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ int ret;
+
+ /*
+ * SEV feature support can be detected on multiple devices but the SEV
+ * FW commands must be issued on the master. During probe, we do not
+ * know the master hence we create /dev/sev on the first device probe.
+ * sev_do_cmd() finds the right master device to which to issue the
+ * command to the firmware.
+ */
+ if (!misc_dev) {
+ struct miscdevice *misc;
+
+ misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
+ if (!misc_dev)
+ return -ENOMEM;
+
+ misc = &misc_dev->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = DEVICE_NAME;
+ misc->fops = &sev_fops;
+
+ ret = misc_register(misc);
+ if (ret)
+ return ret;
+
+ kref_init(&misc_dev->refcount);
+ } else {
+ kref_get(&misc_dev->refcount);
+ }
+
+ init_waitqueue_head(&psp->sev_int_queue);
+ psp->sev_misc = misc_dev;
+ dev_dbg(dev, "registered SEV device\n");
+
+ return 0;
+}
+
+static int sev_init(struct psp_device *psp)
+{
+ /* Check if device supports SEV feature */
+ if (!(ioread32(psp->io_regs + PSP_FEATURE_REG) & 1)) {
+ dev_dbg(psp->dev, "device does not support SEV\n");
+ return 1;
+ }
+
+ return sev_misc_init(psp);
+}
+
+int psp_dev_init(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ struct psp_device *psp;
+ int ret;
+
+ ret = -ENOMEM;
+ psp = psp_alloc_struct(sp);
+ if (!psp)
+ goto e_err;
+
+ sp->psp_data = psp;
+
+ psp->vdata = (struct psp_vdata *)sp->dev_vdata->psp_vdata;
+ if (!psp->vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ psp->io_regs = sp->io_map + psp->vdata->offset;
+
+ /* Disable and clear interrupts until ready */
+ iowrite32(0, psp->io_regs + PSP_P2CMSG_INTEN);
+ iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTSTS);
+
+ /* Request an irq */
+ ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp);
+ if (ret) {
+ dev_err(dev, "psp: unable to allocate an IRQ\n");
+ goto e_err;
+ }
+
+ ret = sev_init(psp);
+ if (ret)
+ goto e_irq;
+
+ if (sp->set_psp_master_device)
+ sp->set_psp_master_device(sp);
+
+ /* Enable interrupt */
+ iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTEN);
+
+ return 0;
+
+e_irq:
+ sp_free_psp_irq(psp->sp, psp);
+e_err:
+ sp->psp_data = NULL;
+
+ dev_notice(dev, "psp initialization failed\n");
+
+ return ret;
+}
+
+void psp_dev_destroy(struct sp_device *sp)
+{
+ struct psp_device *psp = sp->psp_data;
+
+ if (psp->sev_misc)
+ kref_put(&misc_dev->refcount, sev_exit);
+
+ sp_free_psp_irq(sp, psp);
+}
+
+int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
+ void *data, int *error)
+{
+ if (!filep || filep->f_op != &sev_fops)
+ return -EBADF;
+
+ return sev_do_cmd(cmd, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
+
+void psp_pci_init(void)
+{
+ struct sev_user_data_status *status;
+ struct sp_device *sp;
+ int error, rc;
+
+ sp = sp_get_psp_master_device();
+ if (!sp)
+ return;
+
+ psp_master = sp->psp_data;
+
+ /* Initialize the platform */
+ rc = sev_platform_init(&error);
+ if (rc) {
+ dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
+ goto err;
+ }
+
+ /* Display SEV firmware version */
+ status = &psp_master->status_cmd_buf;
+ rc = sev_platform_status(status, &error);
+ if (rc) {
+ dev_err(sp->dev, "SEV: failed to get status error %#x\n", error);
+ goto err;
+ }
+
+ dev_info(sp->dev, "SEV API:%d.%d build:%d\n", status->api_major,
+ status->api_minor, status->build);
+ return;
+
+err:
+ psp_master = NULL;
+}
+
+void psp_pci_exit(void)
+{
+ if (!psp_master)
+ return;
+
+ sev_platform_shutdown(NULL);
+}
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
new file mode 100644
index 000000000000..c81f0b11287a
--- /dev/null
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -0,0 +1,83 @@
+/*
+ * AMD Platform Security Processor (PSP) interface driver
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PSP_DEV_H__
+#define __PSP_DEV_H__
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
+#include <linux/psp-sev.h>
+#include <linux/miscdevice.h>
+
+#include "sp-dev.h"
+
+#define PSP_C2PMSG(_num) ((_num) << 2)
+#define PSP_CMDRESP PSP_C2PMSG(32)
+#define PSP_CMDBUFF_ADDR_LO PSP_C2PMSG(56)
+#define PSP_CMDBUFF_ADDR_HI PSP_C2PMSG(57)
+#define PSP_FEATURE_REG PSP_C2PMSG(63)
+
+#define PSP_P2CMSG(_num) ((_num) << 2)
+#define PSP_CMD_COMPLETE_REG 1
+#define PSP_CMD_COMPLETE PSP_P2CMSG(PSP_CMD_COMPLETE_REG)
+
+#define PSP_P2CMSG_INTEN 0x0110
+#define PSP_P2CMSG_INTSTS 0x0114
+
+#define PSP_C2PMSG_ATTR_0 0x0118
+#define PSP_C2PMSG_ATTR_1 0x011c
+#define PSP_C2PMSG_ATTR_2 0x0120
+#define PSP_C2PMSG_ATTR_3 0x0124
+#define PSP_P2CMSG_ATTR_0 0x0128
+
+#define PSP_CMDRESP_CMD_SHIFT 16
+#define PSP_CMDRESP_IOC BIT(0)
+#define PSP_CMDRESP_RESP BIT(31)
+#define PSP_CMDRESP_ERR_MASK 0xffff
+
+#define MAX_PSP_NAME_LEN 16
+
+struct sev_misc_dev {
+ struct kref refcount;
+ struct miscdevice misc;
+};
+
+struct psp_device {
+ struct list_head entry;
+
+ struct psp_vdata *vdata;
+ char name[MAX_PSP_NAME_LEN];
+
+ struct device *dev;
+ struct sp_device *sp;
+
+ void __iomem *io_regs;
+
+ int sev_state;
+ unsigned int sev_int_rcvd;
+ wait_queue_head_t sev_int_queue;
+ struct sev_misc_dev *sev_misc;
+ struct sev_user_data_status status_cmd_buf;
+ struct sev_data_init init_cmd_buf;
+};
+
+#endif /* __PSP_DEV_H */
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index bef387c8abfd..eb0da6572720 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -198,6 +198,8 @@ int sp_init(struct sp_device *sp)
if (sp->dev_vdata->ccp_vdata)
ccp_dev_init(sp);
+ if (sp->dev_vdata->psp_vdata)
+ psp_dev_init(sp);
return 0;
}
@@ -206,6 +208,9 @@ void sp_destroy(struct sp_device *sp)
if (sp->dev_vdata->ccp_vdata)
ccp_dev_destroy(sp);
+ if (sp->dev_vdata->psp_vdata)
+ psp_dev_destroy(sp);
+
sp_del_device(sp);
}
@@ -237,6 +242,27 @@ int sp_resume(struct sp_device *sp)
}
#endif
+struct sp_device *sp_get_psp_master_device(void)
+{
+ struct sp_device *i, *ret = NULL;
+ unsigned long flags;
+
+ write_lock_irqsave(&sp_unit_lock, flags);
+ if (list_empty(&sp_units))
+ goto unlock;
+
+ list_for_each_entry(i, &sp_units, entry) {
+ if (i->psp_data)
+ break;
+ }
+
+ if (i->get_psp_master_device)
+ ret = i->get_psp_master_device();
+unlock:
+ write_unlock_irqrestore(&sp_unit_lock, flags);
+ return ret;
+}
+
static int __init sp_mod_init(void)
{
#ifdef CONFIG_X86
@@ -246,6 +272,10 @@ static int __init sp_mod_init(void)
if (ret)
return ret;
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ psp_pci_init();
+#endif
+
return 0;
#endif
@@ -265,6 +295,11 @@ static int __init sp_mod_init(void)
static void __exit sp_mod_exit(void)
{
#ifdef CONFIG_X86
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ psp_pci_exit();
+#endif
+
sp_pci_exit();
#endif
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 5ab486ade1ad..acb197b66ced 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -42,12 +42,17 @@ struct ccp_vdata {
const unsigned int offset;
const unsigned int rsamax;
};
+
+struct psp_vdata {
+ const unsigned int offset;
+};
+
/* Structure to hold SP device data */
struct sp_dev_vdata {
const unsigned int bar;
const struct ccp_vdata *ccp_vdata;
- void *psp_vdata;
+ const struct psp_vdata *psp_vdata;
};
struct sp_device {
@@ -68,6 +73,10 @@ struct sp_device {
/* DMA caching attribute support */
unsigned int axcache;
+ /* get and set master device */
+ struct sp_device*(*get_psp_master_device)(void);
+ void (*set_psp_master_device)(struct sp_device *);
+
bool irq_registered;
bool use_tasklet;
@@ -103,6 +112,7 @@ void sp_free_ccp_irq(struct sp_device *sp, void *data);
int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler,
const char *name, void *data);
void sp_free_psp_irq(struct sp_device *sp, void *data);
+struct sp_device *sp_get_psp_master_device(void);
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
@@ -130,4 +140,20 @@ static inline int ccp_dev_resume(struct sp_device *sp)
}
#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+
+int psp_dev_init(struct sp_device *sp);
+void psp_pci_init(void);
+void psp_dev_destroy(struct sp_device *sp);
+void psp_pci_exit(void);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
+
+static inline int psp_dev_init(struct sp_device *sp) { return 0; }
+static inline void psp_pci_init(void) { }
+static inline void psp_dev_destroy(struct sp_device *sp) { }
+static inline void psp_pci_exit(void) { }
+
+#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
+
#endif
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 9859aa683a28..f5f43c50698a 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -25,6 +25,7 @@
#include <linux/ccp.h>
#include "ccp-dev.h"
+#include "psp-dev.h"
#define MSIX_VECTORS 2
@@ -32,6 +33,7 @@ struct sp_pci {
int msix_count;
struct msix_entry msix_entry[MSIX_VECTORS];
};
+static struct sp_device *sp_dev_master;
static int sp_get_msix_irqs(struct sp_device *sp)
{
@@ -108,6 +110,45 @@ static void sp_free_irqs(struct sp_device *sp)
sp->psp_irq = 0;
}
+static bool sp_pci_is_master(struct sp_device *sp)
+{
+ struct device *dev_cur, *dev_new;
+ struct pci_dev *pdev_cur, *pdev_new;
+
+ dev_new = sp->dev;
+ dev_cur = sp_dev_master->dev;
+
+ pdev_new = to_pci_dev(dev_new);
+ pdev_cur = to_pci_dev(dev_cur);
+
+ if (pdev_new->bus->number < pdev_cur->bus->number)
+ return true;
+
+ if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn))
+ return true;
+
+ if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn))
+ return true;
+
+ return false;
+}
+
+static void psp_set_master(struct sp_device *sp)
+{
+ if (!sp_dev_master) {
+ sp_dev_master = sp;
+ return;
+ }
+
+ if (sp_pci_is_master(sp))
+ sp_dev_master = sp;
+}
+
+static struct sp_device *psp_get_master(void)
+{
+ return sp_dev_master;
+}
+
static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct sp_device *sp;
@@ -166,6 +207,8 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto e_err;
pci_set_master(pdev);
+ sp->set_psp_master_device = psp_set_master;
+ sp->get_psp_master_device = psp_get_master;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
@@ -225,6 +268,12 @@ static int sp_pci_resume(struct pci_dev *pdev)
}
#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+static const struct psp_vdata psp_entry = {
+ .offset = 0x10500,
+};
+#endif
+
static const struct sp_dev_vdata dev_vdata[] = {
{
.bar = 2,
@@ -237,6 +286,9 @@ static const struct sp_dev_vdata dev_vdata[] = {
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv5a,
#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &psp_entry
+#endif
},
{
.bar = 2,
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 3e104f5aa0c2..5ae9f8706f17 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_AUTHENC
+ select CRYPTO_GF128MUL
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.
@@ -18,3 +19,13 @@ config CRYPTO_DEV_CHELSIO
To compile this driver as a module, choose M here: the module
will be called chcr.
+
+config CHELSIO_IPSEC_INLINE
+ bool "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
+ depends on CRYPTO_DEV_CHELSIO
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Enable support for IPSec Tx Inline.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index bebdf06687ad..eaecaf1ebcf3 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -2,3 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
+chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 0e8160701833..34a02d690548 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -53,6 +53,7 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/authenc.h>
#include <crypto/ctr.h>
@@ -70,6 +71,31 @@
#include "chcr_algo.h"
#include "chcr_crypto.h"
+#define IV AES_BLOCK_SIZE
+
+static unsigned int sgl_ent_len[] = {
+ 0, 0, 16, 24, 40, 48, 64, 72, 88,
+ 96, 112, 120, 136, 144, 160, 168, 184,
+ 192, 208, 216, 232, 240, 256, 264, 280,
+ 288, 304, 312, 328, 336, 352, 360, 376
+};
+
+static unsigned int dsgl_ent_len[] = {
+ 0, 32, 32, 48, 48, 64, 64, 80, 80,
+ 112, 112, 128, 128, 144, 144, 160, 160,
+ 192, 192, 208, 208, 224, 224, 240, 240,
+ 272, 272, 288, 288, 304, 304, 320, 320
+};
+
+static u32 round_constant[11] = {
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000,
+ 0x1B000000, 0x36000000, 0x6C000000
+};
+
+static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+ unsigned char *input, int err);
+
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->aeadctx;
@@ -102,22 +128,82 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
static inline int is_ofld_imm(const struct sk_buff *skb)
{
- return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
+ return (skb->len <= SGE_MAX_WR_LEN);
}
-/*
- * sgl_len - calculates the size of an SGL of the given capacity
- * @n: the number of SGL entries
- * Calculates the number of flits needed for a scatter/gather list that
- * can hold the given number of entries.
- */
-static inline unsigned int sgl_len(unsigned int n)
+static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
+ unsigned int entlen,
+ unsigned int skip)
+{
+ int nents = 0;
+ unsigned int less;
+ unsigned int skip_len = 0;
+
+ while (sg && skip) {
+ if (sg_dma_len(sg) <= skip) {
+ skip -= sg_dma_len(sg);
+ skip_len = 0;
+ sg = sg_next(sg);
+ } else {
+ skip_len = skip;
+ skip = 0;
+ }
+ }
+
+ while (sg && reqlen) {
+ less = min(reqlen, sg_dma_len(sg) - skip_len);
+ nents += DIV_ROUND_UP(less, entlen);
+ reqlen -= less;
+ skip_len = 0;
+ sg = sg_next(sg);
+ }
+ return nents;
+}
+
+static inline void chcr_handle_ahash_resp(struct ahash_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+ int digestsize, updated_digestsize;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+
+ if (input == NULL)
+ goto out;
+ digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+ if (reqctx->is_sg_map)
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+ if (reqctx->dma_addr)
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
+ reqctx->dma_len, DMA_TO_DEVICE);
+ reqctx->dma_addr = 0;
+ updated_digestsize = digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ updated_digestsize = SHA256_DIGEST_SIZE;
+ else if (digestsize == SHA384_DIGEST_SIZE)
+ updated_digestsize = SHA512_DIGEST_SIZE;
+ if (reqctx->result == 1) {
+ reqctx->result = 0;
+ memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
+ digestsize);
+ } else {
+ memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
+ updated_digestsize);
+ }
+out:
+ req->base.complete(&req->base, err);
+}
+
+static inline int get_aead_subtype(struct crypto_aead *aead)
{
- n--;
- return (3 * n) / 2 + (n & 1) + 2;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
-static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
{
u8 temp[SHA512_DIGEST_SIZE];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -142,6 +228,25 @@ static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
*err = 0;
}
+static inline void chcr_handle_aead_resp(struct aead_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
+
+ chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
+ if (reqctx->b0_dma)
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
+ reqctx->b0_len, DMA_BIDIRECTIONAL);
+ if (reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(req, input, &err);
+ reqctx->verify = VERIFY_HW;
+ }
+ req->base.complete(&req->base, err);
+}
+
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
@@ -151,29 +256,11 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
{
struct crypto_tfm *tfm = req->tfm;
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct chcr_req_ctx ctx_req;
- unsigned int digestsize, updated_digestsize;
struct adapter *adap = padap(ctx->dev);
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- ctx_req.req.aead_req = aead_request_cast(req);
- ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
- dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
- ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
- if (ctx_req.ctx.reqctx->skb) {
- kfree_skb(ctx_req.ctx.reqctx->skb);
- ctx_req.ctx.reqctx->skb = NULL;
- }
- free_new_sg(ctx_req.ctx.reqctx->newdstsg);
- ctx_req.ctx.reqctx->newdstsg = NULL;
- if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
- chcr_verify_tag(ctx_req.req.aead_req, input,
- &err);
- ctx_req.ctx.reqctx->verify = VERIFY_HW;
- }
- ctx_req.req.aead_req->base.complete(req, err);
+ chcr_handle_aead_resp(aead_request_cast(req), input, err);
break;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
@@ -182,60 +269,13 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
break;
case CRYPTO_ALG_TYPE_AHASH:
- ctx_req.req.ahash_req = ahash_request_cast(req);
- ctx_req.ctx.ahash_ctx =
- ahash_request_ctx(ctx_req.req.ahash_req);
- digestsize =
- crypto_ahash_digestsize(crypto_ahash_reqtfm(
- ctx_req.req.ahash_req));
- updated_digestsize = digestsize;
- if (digestsize == SHA224_DIGEST_SIZE)
- updated_digestsize = SHA256_DIGEST_SIZE;
- else if (digestsize == SHA384_DIGEST_SIZE)
- updated_digestsize = SHA512_DIGEST_SIZE;
- if (ctx_req.ctx.ahash_ctx->skb) {
- kfree_skb(ctx_req.ctx.ahash_ctx->skb);
- ctx_req.ctx.ahash_ctx->skb = NULL;
+ chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
}
- if (ctx_req.ctx.ahash_ctx->result == 1) {
- ctx_req.ctx.ahash_ctx->result = 0;
- memcpy(ctx_req.req.ahash_req->result, input +
- sizeof(struct cpl_fw6_pld),
- digestsize);
- } else {
- memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
- sizeof(struct cpl_fw6_pld),
- updated_digestsize);
- }
- ctx_req.req.ahash_req->base.complete(req, err);
- break;
- }
atomic_inc(&adap->chcr_stats.complete);
return err;
}
-/*
- * calc_tx_flits_ofld - calculate # of flits for an offload packet
- * @skb: the packet
- * Returns the number of flits needed for the given offload packet.
- * These packets are already fully constructed and no additional headers
- * will be added.
- */
-static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
-{
- unsigned int flits, cnt;
-
- if (is_ofld_imm(skb))
- return DIV_ROUND_UP(skb->len, 8);
-
- flits = skb_transport_offset(skb) / 8; /* headers */
- cnt = skb_shinfo(skb)->nr_frags;
- if (skb_tail_pointer(skb) != skb_transport_header(skb))
- cnt++;
- return flits + sgl_len(cnt);
-}
-
-static inline void get_aes_decrypt_key(unsigned char *dec_key,
+static void get_aes_decrypt_key(unsigned char *dec_key,
const unsigned char *key,
unsigned int keylength)
{
@@ -382,13 +422,19 @@ static inline int is_hmac(struct crypto_tfm *tfm)
return 0;
}
-static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
- struct scatterlist *sg,
- struct phys_sge_parm *sg_param)
+static inline void dsgl_walk_init(struct dsgl_walk *walk,
+ struct cpl_rx_phys_dsgl *dsgl)
+{
+ walk->dsgl = dsgl;
+ walk->nents = 0;
+ walk->to = (struct phys_sge_pairs *)(dsgl + 1);
+}
+
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
{
- struct phys_sge_pairs *to;
- unsigned int len = 0, left_size = sg_param->obsize;
- unsigned int nents = sg_param->nents, i, j = 0;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+
+ phys_cpl = walk->dsgl;
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
| CPL_RX_PHYS_DSGL_ISRDMA_V(0));
@@ -398,94 +444,181 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
CPL_RX_PHYS_DSGL_DCAID_V(0) |
- CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
+ CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
- phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
+ phys_cpl->rss_hdr_int.qid = htons(qid);
phys_cpl->rss_hdr_int.hash_val = 0;
- to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
- sizeof(struct cpl_rx_phys_dsgl));
- for (i = 0; nents && left_size; to++) {
- for (j = 0; j < 8 && nents && left_size; j++, nents--) {
- len = min(left_size, sg_dma_len(sg));
- to->len[j] = htons(len);
- to->addr[j] = cpu_to_be64(sg_dma_address(sg));
- left_size -= len;
- sg = sg_next(sg);
- }
- }
}
-static inline int map_writesg_phys_cpl(struct device *dev,
- struct cpl_rx_phys_dsgl *phys_cpl,
- struct scatterlist *sg,
- struct phys_sge_parm *sg_param)
+static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
+ size_t size,
+ dma_addr_t *addr)
{
- if (!sg || !sg_param->nents)
- return -EINVAL;
+ int j;
- sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
- if (sg_param->nents == 0) {
- pr_err("CHCR : DMA mapping failed\n");
- return -EINVAL;
- }
- write_phys_cpl(phys_cpl, sg, sg_param);
- return 0;
+ if (!size)
+ return;
+ j = walk->nents;
+ walk->to->len[j % 8] = htons(size);
+ walk->to->addr[j % 8] = cpu_to_be64(*addr);
+ j++;
+ if ((j % 8) == 0)
+ walk->to++;
+ walk->nents = j;
}
-static inline int get_aead_subtype(struct crypto_aead *aead)
+static void dsgl_walk_add_sg(struct dsgl_walk *walk,
+ struct scatterlist *sg,
+ unsigned int slen,
+ unsigned int skip)
{
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct chcr_alg_template *chcr_crypto_alg =
- container_of(alg, struct chcr_alg_template, alg.aead);
- return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+ int skip_len = 0;
+ unsigned int left_size = slen, len = 0;
+ unsigned int j = walk->nents;
+ int offset, ent_len;
+
+ if (!slen)
+ return;
+ while (sg && skip) {
+ if (sg_dma_len(sg) <= skip) {
+ skip -= sg_dma_len(sg);
+ skip_len = 0;
+ sg = sg_next(sg);
+ } else {
+ skip_len = skip;
+ skip = 0;
+ }
+ }
+
+ while (left_size && sg) {
+ len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
+ offset = 0;
+ while (len) {
+ ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
+ walk->to->len[j % 8] = htons(ent_len);
+ walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
+ offset + skip_len);
+ offset += ent_len;
+ len -= ent_len;
+ j++;
+ if ((j % 8) == 0)
+ walk->to++;
+ }
+ walk->last_sg = sg;
+ walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
+ skip_len) + skip_len;
+ left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
+ skip_len = 0;
+ sg = sg_next(sg);
+ }
+ walk->nents = j;
}
-static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
+static inline void ulptx_walk_init(struct ulptx_walk *walk,
+ struct ulptx_sgl *ulp)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct chcr_alg_template *chcr_crypto_alg =
- container_of(alg, struct chcr_alg_template, alg.crypto);
-
- return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+ walk->sgl = ulp;
+ walk->nents = 0;
+ walk->pair_idx = 0;
+ walk->pair = ulp->sge;
+ walk->last_sg = NULL;
+ walk->last_sg_len = 0;
}
-static inline void write_buffer_to_skb(struct sk_buff *skb,
- unsigned int *frags,
- char *bfr,
- u8 bfr_len)
+static inline void ulptx_walk_end(struct ulptx_walk *walk)
{
- skb->len += bfr_len;
- skb->data_len += bfr_len;
- skb->truesize += bfr_len;
- get_page(virt_to_page(bfr));
- skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
- offset_in_page(bfr), bfr_len);
- (*frags)++;
+ walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE_V(walk->nents));
}
-static inline void
-write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
- struct scatterlist *sg, unsigned int count)
+static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
+ size_t size,
+ dma_addr_t *addr)
{
- struct page *spage;
- unsigned int page_len;
+ if (!size)
+ return;
- skb->len += count;
- skb->data_len += count;
- skb->truesize += count;
+ if (walk->nents == 0) {
+ walk->sgl->len0 = cpu_to_be32(size);
+ walk->sgl->addr0 = cpu_to_be64(*addr);
+ } else {
+ walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
+ walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
+ walk->pair_idx = !walk->pair_idx;
+ if (!walk->pair_idx)
+ walk->pair++;
+ }
+ walk->nents++;
+}
- while (count > 0) {
- if (!sg || (!(sg->length)))
- break;
- spage = sg_page(sg);
- get_page(spage);
- page_len = min(sg->length, count);
- skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
- (*frags)++;
- count -= page_len;
- sg = sg_next(sg);
+static void ulptx_walk_add_sg(struct ulptx_walk *walk,
+ struct scatterlist *sg,
+ unsigned int len,
+ unsigned int skip)
+{
+ int small;
+ int skip_len = 0;
+ unsigned int sgmin;
+
+ if (!len)
+ return;
+
+ while (sg && skip) {
+ if (sg_dma_len(sg) <= skip) {
+ skip -= sg_dma_len(sg);
+ skip_len = 0;
+ sg = sg_next(sg);
+ } else {
+ skip_len = skip;
+ skip = 0;
+ }
}
+ WARN(!sg, "SG should not be null here\n");
+ if (sg && (walk->nents == 0)) {
+ small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
+ sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
+ walk->sgl->len0 = cpu_to_be32(sgmin);
+ walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
+ walk->nents++;
+ len -= sgmin;
+ walk->last_sg = sg;
+ walk->last_sg_len = sgmin + skip_len;
+ skip_len += sgmin;
+ if (sg_dma_len(sg) == skip_len) {
+ sg = sg_next(sg);
+ skip_len = 0;
+ }
+ }
+
+ while (sg && len) {
+ small = min(sg_dma_len(sg) - skip_len, len);
+ sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
+ walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
+ walk->pair->addr[walk->pair_idx] =
+ cpu_to_be64(sg_dma_address(sg) + skip_len);
+ walk->pair_idx = !walk->pair_idx;
+ walk->nents++;
+ if (!walk->pair_idx)
+ walk->pair++;
+ len -= sgmin;
+ skip_len += sgmin;
+ walk->last_sg = sg;
+ walk->last_sg_len = skip_len;
+ if (sg_dma_len(sg) == skip_len) {
+ sg = sg_next(sg);
+ skip_len = 0;
+ }
+ }
+}
+
+static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.crypto);
+
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
@@ -524,30 +657,46 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
struct scatterlist *dst,
unsigned int minsg,
unsigned int space,
- short int *sent,
- short int *dent)
+ unsigned int srcskip,
+ unsigned int dstskip)
{
int srclen = 0, dstlen = 0;
- int srcsg = minsg, dstsg = 0;
+ int srcsg = minsg, dstsg = minsg;
+ int offset = 0, less;
+
+ if (sg_dma_len(src) == srcskip) {
+ src = sg_next(src);
+ srcskip = 0;
+ }
+
+ if (sg_dma_len(dst) == dstskip) {
+ dst = sg_next(dst);
+ dstskip = 0;
+ }
- *sent = 0;
- *dent = 0;
- while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) &&
+ while (src && dst &&
space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
- srclen += src->length;
+ srclen += (sg_dma_len(src) - srcskip);
srcsg++;
+ offset = 0;
while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
if (srclen <= dstlen)
break;
- dstlen += dst->length;
- dst = sg_next(dst);
+ less = min_t(unsigned int, sg_dma_len(dst) - offset -
+ dstskip, CHCR_DST_SG_SIZE);
+ dstlen += less;
+ offset += less;
+ if (offset == sg_dma_len(dst)) {
+ dst = sg_next(dst);
+ offset = 0;
+ }
dstsg++;
+ dstskip = 0;
}
src = sg_next(src);
+ srcskip = 0;
}
- *sent = srcsg - minsg;
- *dent = dstsg;
return min(srclen, dstlen);
}
@@ -576,47 +725,35 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
}
static inline void create_wreq(struct chcr_context *ctx,
struct chcr_wr *chcr_req,
- void *req, struct sk_buff *skb,
- int kctx_len, int hash_sz,
- int is_iv,
+ struct crypto_async_request *req,
+ unsigned int imm,
+ int hash_sz,
+ unsigned int len16,
unsigned int sc_len,
unsigned int lcb)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
- int iv_loc = IV_DSGL;
int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
- unsigned int immdatalen = 0, nr_frags = 0;
- if (is_ofld_imm(skb)) {
- immdatalen = skb->data_len;
- iv_loc = IV_IMMEDIATE;
- } else {
- nr_frags = skb_shinfo(skb)->nr_frags;
- }
- chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
- ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+ chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
chcr_req->wreq.pld_size_hash_size =
- htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
- FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
+ htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
chcr_req->wreq.len16_pkd =
- htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
- (calc_tx_flits_ofld(skb) * 8), 16)));
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
- is_iv ? iv_loc : IV_NOP, !!lcb,
- ctx->tx_qidx);
+ !!lcb, ctx->tx_qidx);
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
qid);
- chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
- 16) - ((sizeof(chcr_req->wreq)) >> 4)));
+ chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
+ ((sizeof(chcr_req->wreq)) >> 4)));
- chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
- sizeof(chcr_req->key_ctx) +
- kctx_len + sc_len + immdatalen);
+ sizeof(chcr_req->key_ctx) + sc_len);
}
/**
@@ -629,47 +766,52 @@ static inline void create_wreq(struct chcr_context *ctx,
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
+ struct ulptx_sgl *ulptx;
struct chcr_blkcipher_req_ctx *reqctx =
ablkcipher_request_ctx(wrparam->req);
- struct phys_sge_parm sg_param;
- unsigned int frags = 0, transhdr_len, phys_dsgl;
+ unsigned int temp = 0, transhdr_len, dst_size;
int error;
- unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
+ int nents;
+ unsigned int kctx_len;
gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
-
- phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
+ struct adapter *adap = padap(c_ctx(tfm)->dev);
+ nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
+ reqctx->dst_ofst);
+ dst_size = get_space_for_phys_dsgl(nents + 1);
kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
- transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
+ CHCR_SRC_SG_SIZE, reqctx->src_ofst);
+ temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16)
+ * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8);
+ transhdr_len += temp;
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
+ FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
- chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes);
+ chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
- FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
ablkctx->ciph_mode,
- 0, 0, ivsize >> 1);
+ 0, 0, IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
- 0, 1, phys_dsgl);
+ 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if ((reqctx->op == CHCR_DECRYPT_OP) &&
@@ -694,26 +836,18 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
}
}
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- sg_param.nents = reqctx->dst_nents;
- sg_param.obsize = wrparam->bytes;
- sg_param.qid = wrparam->qid;
- error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
- reqctx->dst, &sg_param);
- if (error)
- goto map_fail1;
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+ chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
+ chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
- skb_set_transport_header(skb, transhdr_len);
- write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
- write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes);
atomic_inc(&adap->chcr_stats.cipher_rqst);
- create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1,
- sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
+ +(reqctx->imm ? (IV + wrparam->bytes) : 0);
+ create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
+ transhdr_len, temp,
ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
reqctx->skb = skb;
- skb_get(skb);
return skb;
-map_fail1:
- kfree_skb(skb);
err:
return ERR_PTR(error);
}
@@ -738,8 +872,7 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
int err = 0;
crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
@@ -757,8 +890,7 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
unsigned int ck_size, context_size;
u16 alignment = 0;
int err;
@@ -790,8 +922,7 @@ static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
unsigned int ck_size, context_size;
u16 alignment = 0;
int err;
@@ -822,8 +953,7 @@ static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
unsigned int ck_size, context_size;
u16 alignment = 0;
int err;
@@ -887,31 +1017,36 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
return bytes;
}
-static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
+static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
+ u32 isfinal)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct crypto_cipher *cipher;
int ret, i;
u8 *key;
unsigned int keylen;
+ int round = reqctx->last_req_len / AES_BLOCK_SIZE;
+ int round8 = round / 8;
cipher = ablkctx->aes_generic;
- memcpy(iv, req->info, AES_BLOCK_SIZE);
+ memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
keylen = ablkctx->enckey_len / 2;
key = ablkctx->key + keylen;
ret = crypto_cipher_setkey(cipher, key, keylen);
if (ret)
goto out;
+ /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
+ for (i = 0; i < round8; i++)
+ gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
- crypto_cipher_encrypt_one(cipher, iv, iv);
- for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++)
+ for (i = 0; i < (round % 8); i++)
gf128mul_x_ble((le128 *)iv, (le128 *)iv);
- crypto_cipher_decrypt_one(cipher, iv, iv);
+ if (!isfinal)
+ crypto_cipher_decrypt_one(cipher, iv, iv);
out:
return ret;
}
@@ -932,7 +1067,7 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
AES_BLOCK_SIZE) + 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv);
+ ret = chcr_update_tweak(req, iv, 0);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
@@ -963,7 +1098,7 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
ctr_add_iv(iv, req->info, (reqctx->processed /
AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv);
+ ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
@@ -977,70 +1112,64 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
}
-
static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct cipher_wr_param wrparam;
int bytes;
- dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents,
- DMA_FROM_DEVICE);
-
- if (reqctx->skb) {
- kfree_skb(reqctx->skb);
- reqctx->skb = NULL;
- }
if (err)
- goto complete;
-
+ goto unmap;
if (req->nbytes == reqctx->processed) {
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ req);
err = chcr_final_cipher_iv(req, fw6_pld, req->info);
goto complete;
}
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ c_ctx(tfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
err = -EBUSY;
- goto complete;
+ goto unmap;
}
}
- wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src,
- reqctx->processed);
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg,
- reqctx->processed);
- if (!wrparam.srcsg || !reqctx->dst) {
- pr_err("Input sg list length less that nbytes\n");
- err = -EINVAL;
- goto complete;
- }
- bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1,
- SPACE_LEFT(ablkctx->enckey_len),
- &wrparam.snent, &reqctx->dst_nents);
- if ((bytes + reqctx->processed) >= req->nbytes)
+ if (!reqctx->imm) {
+ bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
+ SPACE_LEFT(ablkctx->enckey_len),
+ reqctx->src_ofst, reqctx->dst_ofst);
+ if ((bytes + reqctx->processed) >= req->nbytes)
+ bytes = req->nbytes - reqctx->processed;
+ else
+ bytes = ROUND_16(bytes);
+ } else {
+ /*CTR mode counter overfloa*/
bytes = req->nbytes - reqctx->processed;
- else
- bytes = ROUND_16(bytes);
+ }
+ dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
+ dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
if (err)
- goto complete;
+ goto unmap;
if (unlikely(bytes == 0)) {
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ req);
err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags,
- wrparam.srcsg,
- reqctx->dst,
- req->nbytes - reqctx->processed,
- reqctx->iv,
+ req->src,
+ req->dst,
+ req->nbytes,
+ req->info,
reqctx->op);
goto complete;
}
@@ -1048,23 +1177,24 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
CRYPTO_ALG_SUB_TYPE_CTR)
bytes = adjust_ctr_overflow(reqctx->iv, bytes);
- reqctx->processed += bytes;
- wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
+ wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
wrparam.req = req;
wrparam.bytes = bytes;
skb = create_cipher_wr(&wrparam);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
err = PTR_ERR(skb);
- goto complete;
+ goto unmap;
}
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
+ reqctx->last_req_len = bytes;
+ reqctx->processed += bytes;
return 0;
+unmap:
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
complete:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
req->base.complete(&req->base, err);
return err;
}
@@ -1077,12 +1207,10 @@ static int process_cipher(struct ablkcipher_request *req,
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct cipher_wr_param wrparam;
- int bytes, nents, err = -EINVAL;
+ int bytes, err = -EINVAL;
- reqctx->newdstsg = NULL;
reqctx->processed = 0;
if (!req->info)
goto error;
@@ -1093,27 +1221,43 @@ static int process_cipher(struct ablkcipher_request *req,
ablkctx->enckey_len, req->nbytes, ivsize);
goto error;
}
- wrparam.srcsg = req->src;
- if (is_newsg(req->dst, &nents)) {
- reqctx->newdstsg = alloc_new_sg(req->dst, nents);
- if (IS_ERR(reqctx->newdstsg))
- return PTR_ERR(reqctx->newdstsg);
- reqctx->dstsg = reqctx->newdstsg;
+ chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+ if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
+ AES_MIN_KEY_SIZE +
+ sizeof(struct cpl_rx_phys_dsgl) +
+ /*Min dsgl size*/
+ 32))) {
+ /* Can be sent as Imm*/
+ unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
+
+ dnents = sg_nents_xlen(req->dst, req->nbytes,
+ CHCR_DST_SG_SIZE, 0);
+ dnents += 1; // IV
+ phys_dsgl = get_space_for_phys_dsgl(dnents);
+ kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
+ reqctx->imm = (transhdr_len + IV + req->nbytes) <=
+ SGE_MAX_WR_LEN;
+ bytes = IV + req->nbytes;
+
} else {
- reqctx->dstsg = req->dst;
+ reqctx->imm = 0;
}
- bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG,
- SPACE_LEFT(ablkctx->enckey_len),
- &wrparam.snent,
- &reqctx->dst_nents);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = ROUND_16(bytes);
- if (unlikely(bytes > req->nbytes))
+
+ if (!reqctx->imm) {
+ bytes = chcr_sg_ent_in_wr(req->src, req->dst,
+ MIN_CIPHER_SG,
+ SPACE_LEFT(ablkctx->enckey_len),
+ 0, 0);
+ if ((bytes + reqctx->processed) >= req->nbytes)
+ bytes = req->nbytes - reqctx->processed;
+ else
+ bytes = ROUND_16(bytes);
+ } else {
bytes = req->nbytes;
+ }
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
- CRYPTO_ALG_SUB_TYPE_CTR) {
+ CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->info, bytes);
}
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
@@ -1128,9 +1272,11 @@ static int process_cipher(struct ablkcipher_request *req,
} else {
- memcpy(reqctx->iv, req->info, ivsize);
+ memcpy(reqctx->iv, req->info, IV);
}
if (unlikely(bytes == 0)) {
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ req);
err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags,
req->src,
@@ -1140,45 +1286,48 @@ static int process_cipher(struct ablkcipher_request *req,
op_type);
goto error;
}
- reqctx->processed = bytes;
- reqctx->dst = reqctx->dstsg;
reqctx->op = op_type;
+ reqctx->srcsg = req->src;
+ reqctx->dstsg = req->dst;
+ reqctx->src_ofst = 0;
+ reqctx->dst_ofst = 0;
wrparam.qid = qid;
wrparam.req = req;
wrparam.bytes = bytes;
*skb = create_cipher_wr(&wrparam);
if (IS_ERR(*skb)) {
err = PTR_ERR(*skb);
- goto error;
+ goto unmap;
}
+ reqctx->processed = bytes;
+ reqctx->last_req_len = bytes;
return 0;
+unmap:
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
error:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
return err;
}
static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct sk_buff *skb = NULL;
int err;
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ c_ctx(tfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
- err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
- CHCR_ENCRYPT_OP);
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ &skb, CHCR_ENCRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -1186,23 +1335,22 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct sk_buff *skb = NULL;
int err;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ c_ctx(tfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
- err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
- CHCR_DECRYPT_OP);
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ &skb, CHCR_DECRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -1350,17 +1498,19 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
struct sk_buff *skb = NULL;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
struct chcr_wr *chcr_req;
- unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
+ struct ulptx_sgl *ulptx;
+ unsigned int nents = 0, transhdr_len, iopad_alignment = 0;
unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int kctx_len = 0;
+ unsigned int kctx_len = 0, temp = 0;
u8 hash_size_in_response = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
+ struct adapter *adap = padap(h_ctx(tfm)->dev);
+ int error = 0;
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
kctx_len = param->alg_prm.result_size + iopad_alignment;
@@ -1372,15 +1522,22 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
else
hash_size_in_response = param->alg_prm.result_size;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
+ SGE_MAX_WR_LEN;
+ nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
+ nents += param->bfr_len ? 1 : 0;
+ transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len +
+ param->sg_len), 16) * 16) :
+ (sgl_len(nents) * 8);
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb)
- return skb;
-
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+ return ERR_PTR(-ENOMEM);
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
+ FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1409,37 +1566,52 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
((kctx_len +
sizeof(chcr_req->key_ctx)) >> 4));
chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
-
- skb_set_transport_header(skb, transhdr_len);
- if (param->bfr_len != 0)
- write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
- param->bfr_len);
- if (param->sg_len != 0)
- write_sg_to_skb(skb, &frags, req->src, param->sg_len);
+ ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len +
+ DUMMY_BYTES);
+ if (param->bfr_len != 0) {
+ req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev,
+ req_ctx->reqbfr, param->bfr_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
+ req_ctx->dma_addr)) {
+ error = -ENOMEM;
+ goto err;
+ }
+ req_ctx->dma_len = param->bfr_len;
+ } else {
+ req_ctx->dma_addr = 0;
+ }
+ chcr_add_hash_src_ent(req, ulptx, param);
+ /* Request upto max wr size */
+ temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len
+ + param->bfr_len) : 0);
atomic_inc(&adap->chcr_stats.digest_rqst);
- create_wreq(ctx, chcr_req, &req->base, skb, kctx_len,
- hash_size_in_response, 0, DUMMY_BYTES, 0);
+ create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm,
+ hash_size_in_response, transhdr_len,
+ temp, 0);
req_ctx->skb = skb;
- skb_get(skb);
return skb;
+err:
+ kfree_skb(skb);
+ return ERR_PTR(error);
}
static int chcr_ahash_update(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
u8 remainder = 0, bs;
unsigned int nbytes = req->nbytes;
struct hash_wr_param params;
+ int error;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ h_ctx(rtfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@@ -1453,7 +1625,9 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->reqlen += nbytes;
return 0;
}
-
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error)
+ return -ENOMEM;
params.opad_needed = 0;
params.more = 1;
params.last = 0;
@@ -1464,25 +1638,27 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->result = 0;
req_ctx->data_len += params.sg_len + params.bfr_len;
skb = create_hash_wr(req, &params);
- if (!skb)
- return -ENOMEM;
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto unmap;
+ }
if (remainder) {
- u8 *temp;
/* Swap buffers */
- temp = req_ctx->reqbfr;
- req_ctx->reqbfr = req_ctx->skbfr;
- req_ctx->skbfr = temp;
+ swap(req_ctx->reqbfr, req_ctx->skbfr);
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
req_ctx->reqbfr, remainder, req->nbytes -
remainder);
}
req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+ return error;
}
static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
@@ -1499,13 +1675,12 @@ static int chcr_ahash_final(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct hash_wr_param params;
struct sk_buff *skb;
struct uld_ctx *u_ctx = NULL;
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
if (is_hmac(crypto_ahash_tfm(rtfm)))
params.opad_needed = 1;
else
@@ -1528,11 +1703,11 @@ static int chcr_ahash_final(struct ahash_request *req)
params.more = 0;
}
skb = create_hash_wr(req, &params);
- if (!skb)
- return -ENOMEM;
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -1541,17 +1716,17 @@ static int chcr_ahash_finup(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
+ int error;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ h_ctx(rtfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@@ -1577,34 +1752,41 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.last = 1;
params.more = 0;
}
-
- skb = create_hash_wr(req, &params);
- if (!skb)
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error)
return -ENOMEM;
+ skb = create_hash_wr(req, &params);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto unmap;
+ }
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+ return error;
}
static int chcr_ahash_digest(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
+ int error;
rtfm->init(req);
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ h_ctx(rtfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@@ -1613,6 +1795,9 @@ static int chcr_ahash_digest(struct ahash_request *req)
params.opad_needed = 1;
else
params.opad_needed = 0;
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error)
+ return -ENOMEM;
params.last = 0;
params.more = 0;
@@ -1630,13 +1815,17 @@ static int chcr_ahash_digest(struct ahash_request *req)
}
skb = create_hash_wr(req, &params);
- if (!skb)
- return -ENOMEM;
-
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto unmap;
+ }
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+ return error;
}
static int chcr_ahash_export(struct ahash_request *areq, void *out)
@@ -1646,6 +1835,8 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
state->reqlen = req_ctx->reqlen;
state->data_len = req_ctx->data_len;
+ state->is_sg_map = 0;
+ state->result = 0;
memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
@@ -1661,6 +1852,8 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
req_ctx->data_len = state->data_len;
req_ctx->reqbfr = req_ctx->bfr1;
req_ctx->skbfr = req_ctx->bfr2;
+ req_ctx->is_sg_map = 0;
+ req_ctx->result = 0;
memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
@@ -1670,8 +1863,7 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
unsigned int digestsize = crypto_ahash_digestsize(tfm);
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int i, err = 0, updated_digestsize;
@@ -1724,8 +1916,7 @@ out:
static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
unsigned int key_len)
{
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
unsigned short context_size = 0;
int err;
@@ -1764,6 +1955,7 @@ static int chcr_sha_init(struct ahash_request *areq)
req_ctx->skbfr = req_ctx->bfr2;
req_ctx->skb = NULL;
req_ctx->result = 0;
+ req_ctx->is_sg_map = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize);
return 0;
}
@@ -1779,8 +1971,7 @@ static int chcr_hmac_init(struct ahash_request *areq)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
unsigned int digestsize = crypto_ahash_digestsize(rtfm);
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
@@ -1826,86 +2017,45 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
}
}
-static int is_newsg(struct scatterlist *sgl, unsigned int *newents)
+static int chcr_aead_common_init(struct aead_request *req,
+ unsigned short op_type)
{
- int nents = 0;
- int ret = 0;
-
- while (sgl) {
- if (sgl->length > CHCR_SG_SIZE)
- ret = 1;
- nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE);
- sgl = sg_next(sgl);
- }
- *newents = nents;
- return ret;
-}
-
-static inline void free_new_sg(struct scatterlist *sgl)
-{
- kfree(sgl);
-}
-
-static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
- unsigned int nents)
-{
- struct scatterlist *newsg, *sg;
- int i, len, processed = 0;
- struct page *spage;
- int offset;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int error = -EINVAL;
+ unsigned int authsize = crypto_aead_authsize(tfm);
- newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL);
- if (!newsg)
- return ERR_PTR(-ENOMEM);
- sg = newsg;
- sg_init_table(sg, nents);
- offset = sgl->offset;
- spage = sg_page(sgl);
- for (i = 0; i < nents; i++) {
- len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE);
- sg_set_page(sg, spage, len, offset);
- processed += len;
- offset += len;
- if (offset >= PAGE_SIZE) {
- offset = offset % PAGE_SIZE;
- spage++;
- }
- if (processed == sgl->length) {
- processed = 0;
- sgl = sg_next(sgl);
- if (!sgl)
- break;
- spage = sg_page(sgl);
- offset = sgl->offset;
- }
- sg = sg_next(sg);
+ /* validate key size */
+ if (aeadctx->enckey_len == 0)
+ goto err;
+ if (op_type && req->cryptlen < authsize)
+ goto err;
+ error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
+ if (error) {
+ error = -ENOMEM;
+ goto err;
}
- return newsg;
+ reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
+ CHCR_SRC_SG_SIZE, 0);
+ reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
+ CHCR_SRC_SG_SIZE, req->assoclen);
+ return 0;
+err:
+ return error;
}
-static int chcr_copy_assoc(struct aead_request *req,
- struct chcr_aead_ctx *ctx)
-{
- SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-
- skcipher_request_set_tfm(skreq, ctx->null);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
- NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
+static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
int aadmax, int wrlen,
unsigned short op_type)
{
unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
+ dst_nents > MAX_DSGL_ENT ||
(req->assoclen > aadmax) ||
- (src_nent > MAX_SKB_FRAGS) ||
- (wrlen > MAX_WR_SIZE))
+ (wrlen > SGE_MAX_WR_LEN))
return 1;
return 0;
}
@@ -1913,8 +2063,7 @@ static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct aead_request *subreq = aead_request_ctx(req);
aead_request_set_tfm(subreq, aeadctx->sw_cipher);
@@ -1933,96 +2082,70 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct phys_sge_parm sg_param;
- struct scatterlist *src;
- unsigned int frags = 0, transhdr_len;
- unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
- unsigned int kctx_len = 0, nents;
- unsigned short stop_offset = 0;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len;
+ unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
+ unsigned int kctx_len = 0, dnents;
unsigned int assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL, src_nent;
+ int error = -EINVAL;
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
+ struct adapter *adap = padap(a_ctx(tfm)->dev);
- reqctx->newdstsg = NULL;
- dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
- if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
- goto err;
+ if (req->cryptlen == 0)
+ return NULL;
- if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
- goto err;
- src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
- if (src_nent < 0)
- goto err;
- src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
-
- if (req->src != req->dst) {
- error = chcr_copy_assoc(req, aeadctx);
- if (error)
- return ERR_PTR(error);
- }
- if (dst_size && is_newsg(req->dst, &nents)) {
- reqctx->newdstsg = alloc_new_sg(req->dst, nents);
- if (IS_ERR(reqctx->newdstsg))
- return ERR_CAST(reqctx->newdstsg);
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- reqctx->newdstsg, req->assoclen);
- } else {
- if (req->src == req->dst)
- reqctx->dst = src;
- else
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- req->dst, req->assoclen);
- }
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+ reqctx->b0_dma = 0;
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
null = 1;
assoclen = 0;
}
- reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
- (op_type ? -authsize : authsize));
- if (reqctx->dst_nents < 0) {
- pr_err("AUTHENC:Invalid Destination sg entries\n");
- error = -EINVAL;
- goto err;
- }
- dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ error = chcr_aead_common_init(req, op_type);
+ if (error)
+ return ERR_PTR(error);
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
+ req->assoclen);
+ dnents += MIN_AUTH_SG; // For IV
+
+ dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
- T6_MAX_AAD_SIZE,
- transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
- op_type)) {
+ reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
+ SGE_MAX_WR_LEN;
+ temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16)
+ * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents
+ + MIN_GCM_SG) * 8);
+ transhdr_len += temp;
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+ transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- /* LLD is going to write the sge hdr. */
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-
- /* Write WR */
chcr_req = __skb_put_zero(skb, transhdr_len);
- stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
/*
* Input order is AAD,IV and Payload. where IV should be included as
@@ -2030,27 +2153,34 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
* to the hardware spec
*/
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2,
- (ivsize ? (assoclen + 1) : 0));
- chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+ FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
+ assoclen + 1);
+ chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen,
- assoclen + ivsize + 1,
- (stop_offset & 0x1F0) >> 4);
+ assoclen + IV + 1,
+ (temp & 0x1F0) >> 4);
chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
- stop_offset & 0xF,
- null ? 0 : assoclen + ivsize + 1,
- stop_offset, stop_offset);
+ temp & 0xF,
+ null ? 0 : assoclen + IV + 1,
+ temp, temp);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+ else
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
- CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ temp,
actx->auth_mode, aeadctx->hmac_ctrl,
- ivsize >> 1);
+ IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
- 0, 1, dst_size);
+ 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
- if (op_type == CHCR_ENCRYPT_OP)
+ if (op_type == CHCR_ENCRYPT_OP ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
memcpy(chcr_req->key_ctx.key, aeadctx->key,
aeadctx->enckey_len);
else
@@ -2060,41 +2190,318 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
4), actx->h_iopad, kctx_len -
(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
-
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- sg_param.nents = reqctx->dst_nents;
- sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
- sg_param.qid = qid;
- error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
- reqctx->dst, &sg_param);
- if (error)
- goto dstmap_fail;
-
- skb_set_transport_header(skb, transhdr_len);
-
- if (assoclen) {
- /* AAD buffer in */
- write_sg_to_skb(skb, &frags, req->src, assoclen);
-
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ CTR_RFC3686_IV_SIZE);
+ *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+ } else {
+ memcpy(reqctx->iv, req->iv, IV);
}
- write_buffer_to_skb(skb, &frags, req->iv, ivsize);
- write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+ chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
+ chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
atomic_inc(&adap->chcr_stats.cipher_rqst);
- create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
- sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+ kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+ transhdr_len, temp, 0);
reqctx->skb = skb;
- skb_get(skb);
+ reqctx->op = op_type;
return skb;
-dstmap_fail:
- /* ivmap_fail: */
- kfree_skb(skb);
err:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
+
return ERR_PTR(error);
}
+int chcr_aead_dma_map(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
+{
+ int error;
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int dst_size;
+
+ dst_size = req->assoclen + req->cryptlen + (op_type ?
+ -authsize : authsize);
+ if (!req->cryptlen || !dst_size)
+ return 0;
+ reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, reqctx->iv_dma))
+ return -ENOMEM;
+
+ if (req->src == req->dst) {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ if (!error)
+ goto err;
+ } else {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ if (!error)
+ goto err;
+ error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ if (!error) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+}
+
+void chcr_aead_dma_unmap(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int dst_size;
+
+ dst_size = req->assoclen + req->cryptlen + (op_type ?
+ -authsize : authsize);
+ if (!req->cryptlen || !dst_size)
+ return;
+
+ dma_unmap_single(dev, reqctx->iv_dma, IV,
+ DMA_BIDIRECTIONAL);
+ if (req->src == req->dst) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ }
+}
+
+void chcr_add_aead_src_ent(struct aead_request *req,
+ struct ulptx_sgl *ulptx,
+ unsigned int assoclen,
+ unsigned short op_type)
+{
+ struct ulptx_walk ulp_walk;
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+ if (reqctx->imm) {
+ u8 *buf = (u8 *)ulptx;
+
+ if (reqctx->b0_dma) {
+ memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
+ buf += reqctx->b0_len;
+ }
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, assoclen, 0);
+ buf += assoclen;
+ memcpy(buf, reqctx->iv, IV);
+ buf += IV;
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, req->cryptlen, req->assoclen);
+ } else {
+ ulptx_walk_init(&ulp_walk, ulptx);
+ if (reqctx->b0_dma)
+ ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
+ &reqctx->b0_dma);
+ ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
+ ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
+ ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
+ req->assoclen);
+ ulptx_walk_end(&ulp_walk);
+ }
+}
+
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen,
+ unsigned short op_type,
+ unsigned short qid)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct dsgl_walk dsgl_walk;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ u32 temp;
+
+ dsgl_walk_init(&dsgl_walk, phys_cpl);
+ if (reqctx->b0_dma)
+ dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
+ dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
+ temp = req->cryptlen + (op_type ? -authsize : authsize);
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
+ dsgl_walk_end(&dsgl_walk, qid);
+}
+
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam)
+{
+ struct ulptx_walk ulp_walk;
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+
+ if (reqctx->imm) {
+ u8 *buf = (u8 *)ulptx;
+
+ memcpy(buf, reqctx->iv, IV);
+ buf += IV;
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, wrparam->bytes, reqctx->processed);
+ } else {
+ ulptx_walk_init(&ulp_walk, ulptx);
+ ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
+ ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
+ reqctx->src_ofst);
+ reqctx->srcsg = ulp_walk.last_sg;
+ reqctx->src_ofst = ulp_walk.last_sg_len;
+ ulptx_walk_end(&ulp_walk);
+ }
+}
+
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid)
+{
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct dsgl_walk dsgl_walk;
+
+ dsgl_walk_init(&dsgl_walk, phys_cpl);
+ dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
+ dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
+ reqctx->dst_ofst);
+ reqctx->dstsg = dsgl_walk.last_sg;
+ reqctx->dst_ofst = dsgl_walk.last_sg_len;
+
+ dsgl_walk_end(&dsgl_walk, qid);
+}
+
+void chcr_add_hash_src_ent(struct ahash_request *req,
+ struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param)
+{
+ struct ulptx_walk ulp_walk;
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+
+ if (reqctx->imm) {
+ u8 *buf = (u8 *)ulptx;
+
+ if (param->bfr_len) {
+ memcpy(buf, reqctx->reqbfr, param->bfr_len);
+ buf += param->bfr_len;
+ }
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, param->sg_len, 0);
+ } else {
+ ulptx_walk_init(&ulp_walk, ulptx);
+ if (param->bfr_len)
+ ulptx_walk_add_page(&ulp_walk, param->bfr_len,
+ &reqctx->dma_addr);
+ ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
+ 0);
+ ulptx_walk_end(&ulp_walk);
+ }
+}
+
+int chcr_hash_dma_map(struct device *dev,
+ struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ int error = 0;
+
+ if (!req->nbytes)
+ return 0;
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ if (!error)
+ return -ENOMEM;
+ req_ctx->is_sg_map = 1;
+ return 0;
+}
+
+void chcr_hash_dma_unmap(struct device *dev,
+ struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return;
+
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ req_ctx->is_sg_map = 0;
+
+}
+
+int chcr_cipher_dma_map(struct device *dev,
+ struct ablkcipher_request *req)
+{
+ int error;
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+
+ reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, reqctx->iv_dma))
+ return -ENOMEM;
+
+ if (req->src == req->dst) {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ if (!error)
+ goto err;
+ } else {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ if (!error)
+ goto err;
+ error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ if (!error) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+}
+
+void chcr_cipher_dma_unmap(struct device *dev,
+ struct ablkcipher_request *req)
+{
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+
+ dma_unmap_single(dev, reqctx->iv_dma, IV,
+ DMA_BIDIRECTIONAL);
+ if (req->src == req->dst) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ }
+}
+
static int set_msg_len(u8 *block, unsigned int msglen, int csize)
{
__be32 data;
@@ -2179,15 +2586,13 @@ static int ccm_format_packet(struct aead_request *req,
static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int dst_size,
struct aead_request *req,
- unsigned short op_type,
- struct chcr_context *chcrctx)
+ unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
- unsigned int ivsize = AES_BLOCK_SIZE;
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
- unsigned int c_id = chcrctx->dev->rx_channel_id;
+ unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
@@ -2200,7 +2605,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
auth_offset = req->cryptlen ?
- (assoclen + ivsize + 1 + ccm_xtra) : 0;
+ (assoclen + IV + 1 + ccm_xtra) : 0;
if (op_type == CHCR_DECRYPT_OP) {
if (crypto_aead_authsize(tfm) != req->cryptlen)
tag_offset = crypto_aead_authsize(tfm);
@@ -2210,14 +2615,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
- 2, (ivsize ? (assoclen + 1) : 0) +
- ccm_xtra);
+ 2, assoclen + 1 + ccm_xtra);
sec_cpl->pldlen =
- htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+ htonl(assoclen + IV + req->cryptlen + ccm_xtra);
/* For CCM there wil be b0 always. So AAD start will be 1 always */
sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1, assoclen + ccm_xtra, assoclen
- + ivsize + 1 + ccm_xtra, 0);
+ + IV + 1 + ccm_xtra, 0);
sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
auth_offset, tag_offset,
@@ -2226,10 +2630,10 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
cipher_mode, mac_mode,
- aeadctx->hmac_ctrl, ivsize >> 1);
+ aeadctx->hmac_ctrl, IV >> 1);
sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
- 1, dst_size);
+ 0, dst_size);
}
int aead_ccm_validate_input(unsigned short op_type,
@@ -2249,131 +2653,77 @@ int aead_ccm_validate_input(unsigned short op_type,
return -EINVAL;
}
}
- if (aeadctx->enckey_len == 0) {
- pr_err("CCM: Encryption key not set\n");
- return -EINVAL;
- }
return 0;
}
-unsigned int fill_aead_req_fields(struct sk_buff *skb,
- struct aead_request *req,
- struct scatterlist *src,
- unsigned int ivsize,
- struct chcr_aead_ctx *aeadctx)
-{
- unsigned int frags = 0;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- /* b0 and aad length(if available) */
-
- write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
- (req->assoclen ? CCM_AAD_FIELD_SIZE : 0));
- if (req->assoclen) {
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
- write_sg_to_skb(skb, &frags, req->src,
- req->assoclen - 8);
- else
- write_sg_to_skb(skb, &frags, req->src, req->assoclen);
- }
- write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
- if (req->cryptlen)
- write_sg_to_skb(skb, &frags, src, req->cryptlen);
-
- return frags;
-}
-
static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
unsigned short qid,
int size,
unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct phys_sge_parm sg_param;
- struct scatterlist *src;
- unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
- unsigned int dst_size = 0, kctx_len, nents;
- unsigned int sub_type;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len;
+ unsigned int dst_size = 0, kctx_len, dnents, temp;
+ unsigned int sub_type, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL, src_nent;
+ int error = -EINVAL;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
-
- dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
- reqctx->newdstsg = NULL;
- if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
- goto err;
- src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
- if (src_nent < 0)
- goto err;
+ struct adapter *adap = padap(a_ctx(tfm)->dev);
+ reqctx->b0_dma = 0;
sub_type = get_aead_subtype(tfm);
- src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
- if (req->src != req->dst) {
- error = chcr_copy_assoc(req, aeadctx);
- if (error) {
- pr_err("AAD copy to destination buffer fails\n");
- return ERR_PTR(error);
- }
- }
- if (dst_size && is_newsg(req->dst, &nents)) {
- reqctx->newdstsg = alloc_new_sg(req->dst, nents);
- if (IS_ERR(reqctx->newdstsg))
- return ERR_CAST(reqctx->newdstsg);
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- reqctx->newdstsg, req->assoclen);
- } else {
- if (req->src == req->dst)
- reqctx->dst = src;
- else
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- req->dst, req->assoclen);
- }
- reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
- (op_type ? -authsize : authsize));
- if (reqctx->dst_nents < 0) {
- pr_err("CCM:Invalid Destination sg entries\n");
- error = -EINVAL;
- goto err;
- }
+ if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ assoclen -= 8;
+ error = chcr_aead_common_init(req, op_type);
+ if (error)
+ return ERR_PTR(error);
+
+
+ reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
if (error)
goto err;
-
- dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen
+ + (op_type ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, req->assoclen);
+ dnents += MIN_CCM_SG; // For IV and B0
+ dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
- T6_MAX_AAD_SIZE - 18,
- transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
- op_type)) {
+ reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
+ reqctx->b0_len) <= SGE_MAX_WR_LEN;
+ temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen +
+ reqctx->b0_len), 16) * 16) :
+ (sgl_len(reqctx->src_nents + reqctx->aad_nents +
+ MIN_CCM_SG) * 8);
+ transhdr_len += temp;
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
+ reqctx->b0_len, transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
-
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-
- chcr_req = __skb_put_zero(skb, transhdr_len);
+ chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
- fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+ fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
@@ -2381,31 +2731,37 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
16), aeadctx->key, aeadctx->enckey_len);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
error = ccm_format_packet(req, aeadctx, sub_type, op_type);
if (error)
goto dstmap_fail;
- sg_param.nents = reqctx->dst_nents;
- sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
- sg_param.qid = qid;
- error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
- reqctx->dst, &sg_param);
- if (error)
+ reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
+ &reqctx->scratch_pad, reqctx->b0_len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
+ reqctx->b0_dma)) {
+ error = -ENOMEM;
goto dstmap_fail;
+ }
+
+ chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
+ chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
- skb_set_transport_header(skb, transhdr_len);
- frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
atomic_inc(&adap->chcr_stats.aead_rqst);
- create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1,
- sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+ kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
+ reqctx->b0_len) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
+ transhdr_len, temp, 0);
reqctx->skb = skb;
- skb_get(skb);
+ reqctx->op = op_type;
+
return skb;
dstmap_fail:
kfree_skb(skb);
err:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
return ERR_PTR(error);
}
@@ -2415,115 +2771,79 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct phys_sge_parm sg_param;
- struct scatterlist *src;
- unsigned int frags = 0, transhdr_len;
- unsigned int ivsize = AES_BLOCK_SIZE;
- unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen;
- unsigned char tag_offset = 0;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len, dnents = 0;
+ unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL, src_nent;
+ int error = -EINVAL;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
-
- reqctx->newdstsg = NULL;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
- /* validate key size */
- if (aeadctx->enckey_len == 0)
- goto err;
-
- if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
- goto err;
- src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen);
- if (src_nent < 0)
- goto err;
-
- src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen);
- if (req->src != req->dst) {
- error = chcr_copy_assoc(req, aeadctx);
- if (error)
- return ERR_PTR(error);
- }
-
- if (dst_size && is_newsg(req->dst, &nents)) {
- reqctx->newdstsg = alloc_new_sg(req->dst, nents);
- if (IS_ERR(reqctx->newdstsg))
- return ERR_CAST(reqctx->newdstsg);
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- reqctx->newdstsg, assoclen);
- } else {
- if (req->src == req->dst)
- reqctx->dst = src;
- else
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- req->dst, assoclen);
- }
-
- reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
- (op_type ? -authsize : authsize));
- if (reqctx->dst_nents < 0) {
- pr_err("GCM:Invalid Destination sg entries\n");
- error = -EINVAL;
- goto err;
- }
+ struct adapter *adap = padap(a_ctx(tfm)->dev);
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+ assoclen = req->assoclen - 8;
- dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ reqctx->b0_dma = 0;
+ error = chcr_aead_common_init(req, op_type);
+ if (error)
+ return ERR_PTR(error);
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, req->assoclen);
+ dnents += MIN_GCM_SG; // For IV
+ dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
- T6_MAX_AAD_SIZE,
- transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
- op_type)) {
+ reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
+ SGE_MAX_WR_LEN;
+ temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV +
+ req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents +
+ reqctx->aad_nents + MIN_GCM_SG) * 8);
+ transhdr_len += temp;
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+ transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- /* NIC driver is going to write the sge hdr. */
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-
chcr_req = __skb_put_zero(skb, transhdr_len);
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
- assoclen = req->assoclen - 8;
-
- tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ //Offset of tag from end
+ temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
- ctx->dev->rx_channel_id, 2, (ivsize ?
- (assoclen + 1) : 0));
+ a_ctx(tfm)->dev->rx_channel_id, 2,
+ (assoclen + 1));
chcr_req->sec_cpl.pldlen =
- htonl(assoclen + ivsize + req->cryptlen);
+ htonl(assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen,
- assoclen + ivsize + 1, 0);
- chcr_req->sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1,
- tag_offset, tag_offset);
- chcr_req->sec_cpl.seqno_numivs =
+ assoclen + IV + 1, 0);
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
+ temp, temp);
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
CHCR_SCMD_AUTH_MODE_GHASH,
- aeadctx->hmac_ctrl, ivsize >> 1);
+ aeadctx->hmac_ctrl, IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
- 0, 1, dst_size);
+ 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
@@ -2534,39 +2854,28 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
if (get_aead_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
memcpy(reqctx->iv, aeadctx->salt, 4);
- memcpy(reqctx->iv + 4, req->iv, 8);
+ memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
} else {
- memcpy(reqctx->iv, req->iv, 12);
+ memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
}
*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- sg_param.nents = reqctx->dst_nents;
- sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
- sg_param.qid = qid;
- error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
- reqctx->dst, &sg_param);
- if (error)
- goto dstmap_fail;
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- skb_set_transport_header(skb, transhdr_len);
- write_sg_to_skb(skb, &frags, req->src, assoclen);
- write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
- write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
+ chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
atomic_inc(&adap->chcr_stats.aead_rqst);
- create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
- sizeof(struct cpl_rx_phys_dsgl) + dst_size,
- reqctx->verify);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+ kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+ transhdr_len, temp, reqctx->verify);
reqctx->skb = skb;
- skb_get(skb);
+ reqctx->op = op_type;
return skb;
-dstmap_fail:
- /* ivmap_fail: */
- kfree_skb(skb);
err:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
return ERR_PTR(error);
}
@@ -2574,8 +2883,7 @@ err:
static int chcr_aead_cra_init(struct crypto_aead *tfm)
{
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct aead_alg *alg = crypto_aead_alg(tfm);
aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
@@ -2586,25 +2894,20 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
sizeof(struct aead_request) +
crypto_aead_reqsize(aeadctx->sw_cipher)));
- aeadctx->null = crypto_get_default_null_skcipher();
- if (IS_ERR(aeadctx->null))
- return PTR_ERR(aeadctx->null);
- return chcr_device_init(ctx);
+ return chcr_device_init(a_ctx(tfm));
}
static void chcr_aead_cra_exit(struct crypto_aead *tfm)
{
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- crypto_put_default_null_skcipher();
crypto_free_aead(aeadctx->sw_cipher);
}
static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
aeadctx->mayverify = VERIFY_HW;
@@ -2613,7 +2916,7 @@ static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
u32 maxauth = crypto_aead_maxauthsize(tfm);
/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
@@ -2651,7 +2954,7 @@ static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
switch (authsize) {
case ICV_4:
@@ -2691,7 +2994,7 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
switch (authsize) {
case ICV_8:
@@ -2717,7 +3020,7 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
switch (authsize) {
case ICV_4:
@@ -2760,8 +3063,7 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(aead);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
unsigned char ck_size, mk_size;
int key_ctx_size = 0;
@@ -2794,8 +3096,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(aead);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
int error;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
@@ -2813,8 +3114,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(aead);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
int error;
if (keylen < 3) {
@@ -2840,8 +3140,7 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(aead);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
struct crypto_cipher *cipher;
unsigned int ck_size;
@@ -2913,12 +3212,11 @@ out:
static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(authenc);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/
struct crypto_authenc_keys keys;
- unsigned int bs;
+ unsigned int bs, subtype;
unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
int err = 0, i, key_ctx_len = 0;
unsigned char ck_size = 0;
@@ -2947,6 +3245,15 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
pr_err("chcr : Unsupported digest size\n");
goto out;
}
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
if (keys.enckeylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keys.enckeylen == AES_KEYSIZE_192) {
@@ -2964,9 +3271,12 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
*/
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
aeadctx->enckey_len = keys.enckeylen;
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
base_hash = chcr_alloc_shash(max_authsize);
if (IS_ERR(base_hash)) {
pr_err("chcr : Base driver cannot be loaded\n");
@@ -3034,12 +3344,12 @@ out:
static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(authenc);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct crypto_authenc_keys keys;
int err;
/* it contains auth and cipher key both*/
+ unsigned int subtype;
int key_ctx_len = 0;
unsigned char ck_size = 0;
@@ -3057,6 +3367,15 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
goto out;
}
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
if (keys.enckeylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keys.enckeylen == AES_KEYSIZE_192) {
@@ -3064,13 +3383,16 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
} else if (keys.enckeylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- pr_err("chcr : Unsupported cipher key\n");
+ pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
goto out;
}
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
aeadctx->enckey_len = keys.enckeylen;
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
key_ctx_len = sizeof(struct _key_ctx)
+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
@@ -3082,6 +3404,40 @@ out:
aeadctx->enckey_len = 0;
return -EINVAL;
}
+
+static int chcr_aead_op(struct aead_request *req,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx;
+ struct sk_buff *skb;
+
+ if (!a_ctx(tfm)->dev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+ u_ctx = ULD_CTX(a_ctx(tfm));
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ a_ctx(tfm)->tx_qidx)) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
+ op_type);
+
+ if (IS_ERR(skb) || !skb)
+ return PTR_ERR(skb);
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
+
static int chcr_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -3090,8 +3446,10 @@ static int chcr_aead_encrypt(struct aead_request *req)
reqctx->verify = VERIFY_HW;
switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
- case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
@@ -3107,7 +3465,7 @@ static int chcr_aead_encrypt(struct aead_request *req)
static int chcr_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int size;
@@ -3120,8 +3478,10 @@ static int chcr_aead_decrypt(struct aead_request *req)
}
switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
- case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
@@ -3134,39 +3494,6 @@ static int chcr_aead_decrypt(struct aead_request *req)
}
}
-static int chcr_aead_op(struct aead_request *req,
- unsigned short op_type,
- int size,
- create_wr_t create_wr_fn)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct uld_ctx *u_ctx;
- struct sk_buff *skb;
-
- if (!ctx->dev) {
- pr_err("chcr : %s : No crypto device.\n", __func__);
- return -ENXIO;
- }
- u_ctx = ULD_CTX(ctx);
- if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx)) {
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
- }
-
- /* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
- op_type);
-
- if (IS_ERR(skb) || !skb)
- return PTR_ERR(skb);
-
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
- chcr_send_wr(skb);
- return -EINPROGRESS;
-}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
@@ -3385,7 +3712,7 @@ static struct chcr_alg_template driver_algs[] = {
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_gcm_ctx),
},
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = GHASH_DIGEST_SIZE,
.setkey = chcr_gcm_setkey,
.setauthsize = chcr_gcm_setauthsize,
@@ -3405,7 +3732,7 @@ static struct chcr_alg_template driver_algs[] = {
sizeof(struct chcr_gcm_ctx),
},
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = GHASH_DIGEST_SIZE,
.setkey = chcr_gcm_setkey,
.setauthsize = chcr_4106_4309_setauthsize,
@@ -3450,7 +3777,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3471,7 +3798,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3493,7 +3820,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3513,7 +3840,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3534,7 +3861,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3555,7 +3882,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3575,6 +3902,133 @@ static struct chcr_alg_template driver_algs[] = {
.setauthsize = chcr_authenc_null_setauthsize,
}
},
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-digest_null-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
+
};
/*
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 583008de51a3..d1673a5d4bf5 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -176,21 +176,21 @@
KEY_CONTEXT_SALT_PRESENT_V(1) | \
KEY_CONTEXT_CTX_LEN_V((ctx_len)))
-#define FILL_WR_OP_CCTX_SIZE(len, ctx_len) \
+#define FILL_WR_OP_CCTX_SIZE \
htonl( \
FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \
FW_CRYPTO_LOOKASIDE_WR) | \
FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \
- FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((len)) | \
- FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \
- FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len)))
+ FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((0)) | \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(0) | \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(0))
-#define FILL_WR_RX_Q_ID(cid, qid, wr_iv, lcb, fid) \
+#define FILL_WR_RX_Q_ID(cid, qid, lcb, fid) \
htonl( \
FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \
FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \
- FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)) | \
+ FW_CRYPTO_LOOKASIDE_WR_IV_V((IV_NOP)) | \
FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid))
#define FILL_ULPTX_CMD_DEST(cid, qid) \
@@ -214,31 +214,17 @@
calc_tx_flits_ofld(skb) * 8), 16)))
#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
- ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1))
-
+ ULP_TX_SC_MORE_V((immdatalen)))
#define MAX_NK 8
-#define CRYPTO_MAX_IMM_TX_PKT_LEN 256
-#define MAX_WR_SIZE 512
#define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0)
#define MAX_DSGL_ENT 32
-#define MAX_DIGEST_SKB_SGE (MAX_SKB_FRAGS - 2)
#define MIN_CIPHER_SG 1 /* IV */
-#define MIN_AUTH_SG 2 /*IV + AAD*/
-#define MIN_GCM_SG 2 /* IV + AAD*/
+#define MIN_AUTH_SG 1 /* IV */
+#define MIN_GCM_SG 1 /* IV */
#define MIN_DIGEST_SG 1 /*Partial Buffer*/
-#define MIN_CCM_SG 3 /*IV+AAD+B0*/
+#define MIN_CCM_SG 2 /*IV+B0*/
#define SPACE_LEFT(len) \
- ((MAX_WR_SIZE - WR_MIN_LEN - (len)))
-
-unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40,
- 48, 64, 72, 88,
- 96, 112, 120, 136,
- 144, 160, 168, 184,
- 192};
-unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
- 112, 112, 128, 128, 144, 144, 160, 160,
- 192, 192, 208, 208, 224, 224, 240, 240,
- 272, 272, 288, 288, 304, 304, 320, 320};
+ ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
struct algo_param {
unsigned int auth_mode;
@@ -258,10 +244,8 @@ struct hash_wr_param {
struct cipher_wr_param {
struct ablkcipher_request *req;
- struct scatterlist *srcsg;
char *iv;
int bytes;
- short int snent;
unsigned short qid;
};
enum {
@@ -299,31 +283,11 @@ enum {
ICV_16 = 16
};
-struct hash_op_params {
- unsigned char mk_size;
- unsigned char pad_align;
- unsigned char auth_mode;
- char hash_name[MAX_HASH_NAME];
- unsigned short block_size;
- unsigned short word_size;
- unsigned short ipad_size;
-};
-
struct phys_sge_pairs {
__be16 len[8];
__be64 addr[8];
};
-struct phys_sge_parm {
- unsigned int nents;
- unsigned int obsize;
- unsigned short qid;
-};
-
-struct crypto_result {
- struct completion completion;
- int err;
-};
static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
@@ -431,10 +395,4 @@ static inline u32 aes_ks_subword(const u32 w)
return *(u32 *)(&bytes[0]);
}
-static u32 round_constant[11] = {
- 0x01000000, 0x02000000, 0x04000000, 0x08000000,
- 0x10000000, 0x20000000, 0x40000000, 0x80000000,
- 0x1B000000, 0x36000000, 0x6C000000
-};
-
#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index b6dd9cbe815f..04f277cade7c 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -48,6 +48,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ .tx_handler = chcr_uld_tx_handler,
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
};
struct uld_ctx *assign_chcr_device(void)
@@ -154,16 +157,20 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
struct uld_ctx *u_ctx;
/* Create the device and add it in the device list */
+ if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* Create the device and add it in the device list */
u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
if (!u_ctx) {
u_ctx = ERR_PTR(-ENOMEM);
goto out;
}
- if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) {
- u_ctx = ERR_PTR(-ENOMEM);
- goto out;
- }
u_ctx->lldi = *lld;
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
+ chcr_add_xfrmops(lld);
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
out:
return u_ctx;
}
@@ -187,6 +194,13 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
+{
+ return chcr_ipsec_xmit(skb, dev);
+}
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
struct uld_ctx *u_ctx = handle;
@@ -224,7 +238,7 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void)
{
if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
- pr_err("ULD register fail: No chcr crypto support in cxgb4");
+ pr_err("ULD register fail: No chcr crypto support in cxgb4\n");
return 0;
}
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index c9a19b2a1e9f..3c29ee09b8b5 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -39,6 +39,7 @@
#include <crypto/algapi.h>
#include "t4_hw.h"
#include "cxgb4.h"
+#include "t4_msg.h"
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
@@ -89,12 +90,49 @@ struct uld_ctx {
struct chcr_dev *dev;
};
-struct uld_ctx * assign_chcr_device(void);
+struct chcr_ipsec_req {
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
+struct chcr_ipsec_wr {
+ struct fw_ulptx_wr wreq;
+ struct chcr_ipsec_req req;
+};
+
+struct ipsec_sa_entry {
+ int hmac_ctrl;
+ unsigned int enckey_len;
+ unsigned int kctx_len;
+ unsigned int authsize;
+ __be32 key_ctx_hdr;
+ char salt[MAX_SALT];
+ char key[2 * AES_MAX_KEY_SIZE];
+};
+
+/*
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
int stop_crypto(void);
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl);
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 30af1ee17b87..7daf0a17a7d2 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -134,14 +134,16 @@
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
-#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_SHA 0x04000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
-#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_NULL 0x07000000
#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
#define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 0x09000000
#define CRYPTO_ALG_SUB_TYPE_XTS 0x0a000000
#define CRYPTO_ALG_SUB_TYPE_CBC 0x0b000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_SHA 0x0c000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_NULL 0x0d000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
@@ -149,9 +151,23 @@
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
-#define CHCR_SG_SIZE 2048
+#define CHCR_SRC_SG_SIZE (0x10000 - sizeof(int))
+#define CHCR_DST_SG_SIZE 2048
-/* Aligned to 128 bit boundary */
+static inline struct chcr_context *a_ctx(struct crypto_aead *tfm)
+{
+ return crypto_aead_ctx(tfm);
+}
+
+static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm)
+{
+ return crypto_ablkcipher_ctx(tfm);
+}
+
+static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+}
struct ablk_ctx {
struct crypto_skcipher *sw_cipher;
@@ -165,16 +181,37 @@ struct ablk_ctx {
};
struct chcr_aead_reqctx {
struct sk_buff *skb;
- struct scatterlist *dst;
- struct scatterlist *newdstsg;
- struct scatterlist srcffwd[2];
- struct scatterlist dstffwd[2];
+ dma_addr_t iv_dma;
+ dma_addr_t b0_dma;
+ unsigned int b0_len;
+ unsigned int op;
+ short int aad_nents;
+ short int src_nents;
short int dst_nents;
+ u16 imm;
u16 verify;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
};
+struct ulptx_walk {
+ struct ulptx_sgl *sgl;
+ unsigned int nents;
+ unsigned int pair_idx;
+ unsigned int last_sg_len;
+ struct scatterlist *last_sg;
+ struct ulptx_sge_pair *pair;
+
+};
+
+struct dsgl_walk {
+ unsigned int nents;
+ unsigned int last_sg_len;
+ struct scatterlist *last_sg;
+ struct cpl_rx_phys_dsgl *dsgl;
+ struct phys_sge_pairs *to;
+};
+
struct chcr_gcm_ctx {
u8 ghash_h[AEAD_H_SIZE];
};
@@ -190,22 +227,18 @@ struct __aead_ctx {
struct chcr_authenc_ctx authenc[0];
};
-
-
struct chcr_aead_ctx {
__be32 key_ctx_hdr;
unsigned int enckey_len;
- struct crypto_skcipher *null;
struct crypto_aead *sw_cipher;
u8 salt[MAX_SALT];
u8 key[CHCR_AES_MAX_KEY_LEN];
+ u8 nonce[4];
u16 hmac_ctrl;
u16 mayverify;
struct __aead_ctx ctx[0];
};
-
-
struct hmac_ctx {
struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
@@ -231,8 +264,11 @@ struct chcr_ahash_req_ctx {
u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 *reqbfr;
u8 *skbfr;
+ dma_addr_t dma_addr;
+ u32 dma_len;
u8 reqlen;
- /* DMA the partial hash in it */
+ u8 imm;
+ u8 is_sg_map;
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u64 data_len; /* Data len till time */
/* SKB which is being sent to the hardware for processing */
@@ -241,14 +277,15 @@ struct chcr_ahash_req_ctx {
struct chcr_blkcipher_req_ctx {
struct sk_buff *skb;
- struct scatterlist srcffwd[2];
- struct scatterlist dstffwd[2];
struct scatterlist *dstsg;
- struct scatterlist *dst;
- struct scatterlist *newdstsg;
unsigned int processed;
+ unsigned int last_req_len;
+ struct scatterlist *srcsg;
+ unsigned int src_ofst;
+ unsigned int dst_ofst;
unsigned int op;
- short int dst_nents;
+ dma_addr_t iv_dma;
+ u16 imm;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
};
@@ -262,38 +299,34 @@ struct chcr_alg_template {
} alg;
};
-struct chcr_req_ctx {
- union {
- struct ahash_request *ahash_req;
- struct aead_request *aead_req;
- struct ablkcipher_request *ablk_req;
- } req;
- union {
- struct chcr_ahash_req_ctx *ahash_ctx;
- struct chcr_aead_reqctx *reqctx;
- struct chcr_blkcipher_req_ctx *ablk_ctx;
- } ctx;
-};
-
-struct sge_opaque_hdr {
- void *dev;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
-};
-
typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
unsigned short qid,
int size,
unsigned short op_type);
-static int chcr_aead_op(struct aead_request *req_base,
- unsigned short op_type,
- int size,
- create_wr_t create_wr_fn);
-static inline int get_aead_subtype(struct crypto_aead *aead);
-static int is_newsg(struct scatterlist *sgl, unsigned int *newents);
-static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
- unsigned int nents);
-static inline void free_new_sg(struct scatterlist *sgl);
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
- unsigned char *input, int err);
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
+int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen, unsigned short op_type,
+ unsigned short qid);
+void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
+ unsigned int assoclen, unsigned short op_type);
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam);
+int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
+void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid);
+int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
+void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param);
+int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
+void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
new file mode 100644
index 000000000000..db1e241104ed
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -0,0 +1,654 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written and Maintained by:
+ * Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#define pr_fmt(fmt) "chcr:" fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/esp.h>
+#include <net/xfrm.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 256
+#define GCM_ESP_IV_SIZE 8
+
+static int chcr_xfrm_add_state(struct xfrm_state *x);
+static void chcr_xfrm_del_state(struct xfrm_state *x);
+static void chcr_xfrm_free_state(struct xfrm_state *x);
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+
+static const struct xfrmdev_ops chcr_xfrmdev_ops = {
+ .xdo_dev_state_add = chcr_xfrm_add_state,
+ .xdo_dev_state_delete = chcr_xfrm_del_state,
+ .xdo_dev_state_free = chcr_xfrm_free_state,
+ .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
+};
+
+/* Add offload xfrms to Chelsio Interface */
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
+{
+ struct net_device *netdev = NULL;
+ int i;
+
+ for (i = 0; i < lld->nports; i++) {
+ netdev = lld->ports[i];
+ if (!netdev)
+ continue;
+ netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+ netdev->features |= NETIF_F_HW_ESP;
+ rtnl_lock();
+ netdev_change_features(netdev);
+ rtnl_unlock();
+ }
+}
+
+static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ int hmac_ctrl;
+ int authsize = x->aead->alg_icv_len / 8;
+
+ sa_entry->authsize = authsize;
+
+ switch (authsize) {
+ case ICV_8:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ break;
+ case ICV_12:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ break;
+ case ICV_16:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return hmac_ctrl;
+}
+
+static inline int chcr_ipsec_setkey(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct crypto_cipher *cipher;
+ int keylen = (x->aead->alg_key_len + 7) / 8;
+ unsigned char *key = x->aead->alg_key;
+ int ck_size, key_ctx_size = 0;
+ unsigned char ghash_h[AEAD_H_SIZE];
+ int ret = 0;
+
+ if (keylen > 3) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(sa_entry->salt, key + keylen, 4);
+ }
+
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("GCM: Invalid key length %d\n", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(sa_entry->key, key, keylen);
+ sa_entry->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+
+ sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+
+ /* Calculate the H = CIPH(K, 0 repeated 16 times).
+ * It will go in key context
+ */
+ cipher = crypto_alloc_cipher("aes-generic", 0, 0);
+ if (IS_ERR(cipher)) {
+ sa_entry->enckey_len = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = crypto_cipher_setkey(cipher, key, keylen);
+ if (ret) {
+ sa_entry->enckey_len = 0;
+ goto out1;
+ }
+ memset(ghash_h, 0, AEAD_H_SIZE);
+ crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+ memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
+ 16), ghash_h, AEAD_H_SIZE);
+ sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+out1:
+ crypto_free_cipher(cipher);
+out:
+ return ret;
+}
+
+/*
+ * chcr_xfrm_add_state
+ * returns 0 on success, negative error if failed to send message to FPGA
+ * positive error if FPGA returned a bad response
+ */
+static int chcr_xfrm_add_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+ int res = 0;
+
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ pr_debug("CHCR: Cannot offload compressed xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ pr_debug("CHCR: Cannot offload ESN xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET &&
+ x->props.family != AF_INET6) {
+ pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ pr_debug("CHCR: Only ESP xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ pr_debug("CHCR: Cannot offload xfrm states without aead\n");
+ return -EINVAL;
+ }
+ if (x->aead->alg_icv_len != 128 &&
+ x->aead->alg_icv_len != 96) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
+ return -EINVAL;
+ }
+ if ((x->aead->alg_key_len != 128 + 32) &&
+ (x->aead->alg_key_len != 256 + 32)) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
+ return -EINVAL;
+ }
+
+ sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+ if (!sa_entry) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+ chcr_ipsec_setkey(x, sa_entry);
+ x->xso.offload_handle = (unsigned long)sa_entry;
+ try_module_get(THIS_MODULE);
+out:
+ return res;
+}
+
+static void chcr_xfrm_del_state(struct xfrm_state *x)
+{
+ /* do nothing */
+ if (!x->xso.offload_handle)
+ return;
+}
+
+static void chcr_xfrm_free_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+
+ if (!x->xso.offload_handle)
+ return;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kfree(sa_entry);
+ module_put(THIS_MODULE);
+}
+
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ /* Offload with IP options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+
+ return true;
+}
+
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
+{
+ int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
+}
+
+static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
+ unsigned int kctx_len)
+{
+ unsigned int flits;
+ int hdrlen = is_eth_imm(skb, kctx_len);
+
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+
+ if (hdrlen)
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+
+ /* Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits += (sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos)
+{
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sge_eth_txq *q;
+ struct cpl_tx_pkt_core *cpl;
+ u64 cntrl = 0;
+ u32 ctrl0, qidx;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cpl = (struct cpl_tx_pkt_core *)pos;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf);
+ if (skb_vlan_tag_present(skb)) {
+ q->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+ }
+
+ cpl->ctrl0 = htonl(ctrl0);
+ cpl->pack = htons(0);
+ cpl->len = htons(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ pos += sizeof(struct cpl_tx_pkt_core);
+ return pos;
+}
+
+inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sge_eth_txq *q;
+ unsigned int len, qidx;
+ struct _key_ctx *key_ctx;
+ int left, eoq, key_len;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
+ key_len = sa_entry->kctx_len;
+
+ /* end of queue, reset pos to start of queue */
+ eoq = (void *)q->q.stat - pos;
+ left = eoq;
+ if (!eoq) {
+ pos = q->q.desc;
+ left = 64 * q->q.size;
+ }
+
+ /* Copy the Key context header */
+ key_ctx = (struct _key_ctx *)pos;
+ key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
+ memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
+ pos += sizeof(struct _key_ctx);
+ left -= sizeof(struct _key_ctx);
+
+ if (likely(len <= left)) {
+ memcpy(key_ctx->key, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ if (key_len <= left) {
+ memcpy(pos, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ memcpy(pos, sa_entry->key, left);
+ memcpy(q->q.desc, sa_entry->key + left,
+ key_len - left);
+ pos = (u8 *)q->q.desc + (key_len - left);
+ }
+ }
+ /* Copy CPL TX PKT XT */
+ pos = copy_cpltx_pktxt(skb, dev, pos);
+
+ return pos;
+}
+
+inline void *chcr_crypto_wreq(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ int credits,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ unsigned int immdatalen = 0;
+ unsigned int ivsize = GCM_ESP_IV_SIZE;
+ struct chcr_ipsec_wr *wr;
+ unsigned int flits;
+ u32 wr_mid;
+ int qidx = skb_get_queue_mapping(skb);
+ struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ unsigned int kctx_len = sa_entry->kctx_len;
+ int qid = q->q.cntxt_id;
+
+ atomic_inc(&adap->chcr_stats.ipsec_cnt);
+
+ flits = calc_tx_sec_flits(skb, kctx_len);
+
+ if (is_eth_imm(skb, kctx_len))
+ immdatalen = skb->len;
+
+ /* WR Header */
+ wr = (struct chcr_ipsec_wr *)pos;
+ wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+ wr_mid |= FW_ULPTX_WR_DATA_F;
+ wr->wreq.flowid_len16 = htonl(wr_mid);
+
+ /* ULPTX */
+ wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
+ wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
+
+ /* Sub-command */
+ wr->req.sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(wr->req.key_ctx) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core) +
+ immdatalen);
+
+ /* CPL_SEC_PDU */
+ wr->req.sec_cpl.op_ivinsrtofst = htonl(
+ CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(2) |
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) + 1)));
+
+ wr->req.sec_cpl.pldlen = htonl(skb->len);
+
+ wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ (skb_transport_offset(skb) + 1),
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr)),
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1), 0);
+
+ wr->req.sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1,
+ sa_entry->authsize,
+ sa_entry->authsize);
+ wr->req.sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH,
+ sa_entry->hmac_ctrl,
+ ivsize >> 1);
+ wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 0, 0);
+
+ pos += sizeof(struct fw_ulptx_wr) +
+ sizeof(struct ulp_txpkt) +
+ sizeof(struct ulptx_idata) +
+ sizeof(struct cpl_tx_sec_pdu);
+
+ pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
+
+ return pos;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ WARN_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+/*
+ * chcr_ipsec_xmit called from ULD Tx handler
+ */
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+ struct ipsec_sa_entry *sa_entry;
+ u64 *pos, *end, *before, *sgl;
+ int qidx, left, credits;
+ unsigned int flits = 0, ndesc, kctx_len;
+ struct adapter *adap;
+ struct sge_eth_txq *q;
+ struct port_info *pi;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ bool immediate = false;
+
+ if (!x->xso.offload_handle)
+ return NETDEV_TX_BUSY;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kctx_len = sa_entry->kctx_len;
+
+ if (skb->sp->len != 1) {
+out_free: dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+
+ flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ eth_txq_stop(q);
+ dev_err(adap->pdev_dev,
+ "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
+ dev->name, qidx, credits, ndesc, txq_avail(&q->q),
+ flits);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (is_eth_imm(skb, kctx_len))
+ immediate = true;
+
+ if (!immediate &&
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ q->mapping_err++;
+ goto out_free;
+ }
+
+ pos = (u64 *)&q->q.desc[q->q.pidx];
+ before = (u64 *)pos;
+ end = (u64 *)pos + flits;
+ /* Setup IPSec CPL */
+ pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
+ credits, sa_entry);
+ if (before > (u64 *)pos) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ }
+ if (pos == (u64 *)q->q.stat) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ pos = (void *)q->q.desc;
+ }
+
+ sgl = (void *)pos;
+ if (immediate) {
+ cxgb4_inline_tx_skb(skb, &q->q, sgl);
+ dev_consume_skb_any(skb);
+ } else {
+ int last_desc;
+
+ cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
+ 0, addr);
+ skb_orphan(skb);
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ q->q.sdesc[last_desc].skb = skb;
+ q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+ }
+ txq_advance(&q->q, ndesc);
+
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 451620b475a0..86f5f459762e 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* exynos-rng.c - Random Number Generator driver for the Exynos
*
@@ -6,15 +7,6 @@
* Loosely based on old driver from drivers/char/hw_random/exynos-rng.c:
* Copyright (C) 2012 Samsung Electronics
* Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -22,12 +14,18 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/internal/rng.h>
#define EXYNOS_RNG_CONTROL 0x0
#define EXYNOS_RNG_STATUS 0x10
+
+#define EXYNOS_RNG_SEED_CONF 0x14
+#define EXYNOS_RNG_GEN_PRNG BIT(1)
+
#define EXYNOS_RNG_SEED_BASE 0x140
#define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4))
#define EXYNOS_RNG_OUT_BASE 0x160
@@ -43,13 +41,21 @@
#define EXYNOS_RNG_SEED_REGS 5
#define EXYNOS_RNG_SEED_SIZE (EXYNOS_RNG_SEED_REGS * 4)
+enum exynos_prng_type {
+ EXYNOS_PRNG_UNKNOWN = 0,
+ EXYNOS_PRNG_EXYNOS4,
+ EXYNOS_PRNG_EXYNOS5,
+};
+
/*
- * Driver re-seeds itself with generated random numbers to increase
- * the randomness.
+ * Driver re-seeds itself with generated random numbers to hinder
+ * backtracking of the original seed.
*
* Time for next re-seed in ms.
*/
-#define EXYNOS_RNG_RESEED_TIME 100
+#define EXYNOS_RNG_RESEED_TIME 1000
+#define EXYNOS_RNG_RESEED_BYTES 65536
+
/*
* In polling mode, do not wait infinitely for the engine to finish the work.
*/
@@ -63,13 +69,17 @@ struct exynos_rng_ctx {
/* Device associated memory */
struct exynos_rng_dev {
struct device *dev;
+ enum exynos_prng_type type;
void __iomem *mem;
struct clk *clk;
+ struct mutex lock;
/* Generated numbers stored for seeding during resume */
u8 seed_save[EXYNOS_RNG_SEED_SIZE];
unsigned int seed_save_len;
/* Time of last seeding in jiffies */
unsigned long last_seeding;
+ /* Bytes generated since last seeding */
+ unsigned long bytes_seeding;
};
static struct exynos_rng_dev *exynos_rng_dev;
@@ -114,39 +124,12 @@ static int exynos_rng_set_seed(struct exynos_rng_dev *rng,
}
rng->last_seeding = jiffies;
+ rng->bytes_seeding = 0;
return 0;
}
/*
- * Read from output registers and put the data under 'dst' array,
- * up to dlen bytes.
- *
- * Returns number of bytes actually stored in 'dst' (dlen
- * or EXYNOS_RNG_SEED_SIZE).
- */
-static unsigned int exynos_rng_copy_random(struct exynos_rng_dev *rng,
- u8 *dst, unsigned int dlen)
-{
- unsigned int cnt = 0;
- int i, j;
- u32 val;
-
- for (j = 0; j < EXYNOS_RNG_SEED_REGS; j++) {
- val = exynos_rng_readl(rng, EXYNOS_RNG_OUT(j));
-
- for (i = 0; i < 4; i++) {
- dst[cnt] = val & 0xff;
- val >>= 8;
- if (++cnt >= dlen)
- return cnt;
- }
- }
-
- return cnt;
-}
-
-/*
* Start the engine and poll for finish. Then read from output registers
* filling the 'dst' buffer up to 'dlen' bytes or up to size of generated
* random data (EXYNOS_RNG_SEED_SIZE).
@@ -160,8 +143,13 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
{
int retry = EXYNOS_RNG_WAIT_RETRIES;
- exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
- EXYNOS_RNG_CONTROL);
+ if (rng->type == EXYNOS_PRNG_EXYNOS4) {
+ exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
+ EXYNOS_RNG_CONTROL);
+ } else if (rng->type == EXYNOS_PRNG_EXYNOS5) {
+ exynos_rng_writel(rng, EXYNOS_RNG_GEN_PRNG,
+ EXYNOS_RNG_SEED_CONF);
+ }
while (!(exynos_rng_readl(rng,
EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && --retry)
@@ -173,7 +161,9 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
/* Clear status bit */
exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE,
EXYNOS_RNG_STATUS);
- *read = exynos_rng_copy_random(rng, dst, dlen);
+ *read = min_t(size_t, dlen, EXYNOS_RNG_SEED_SIZE);
+ memcpy_fromio(dst, rng->mem + EXYNOS_RNG_OUT_BASE, *read);
+ rng->bytes_seeding += *read;
return 0;
}
@@ -187,13 +177,18 @@ static void exynos_rng_reseed(struct exynos_rng_dev *rng)
unsigned int read = 0;
u8 seed[EXYNOS_RNG_SEED_SIZE];
- if (time_before(now, next_seeding))
+ if (time_before(now, next_seeding) &&
+ rng->bytes_seeding < EXYNOS_RNG_RESEED_BYTES)
return;
if (exynos_rng_get_random(rng, seed, sizeof(seed), &read))
return;
exynos_rng_set_seed(rng, seed, read);
+
+ /* Let others do some of their job. */
+ mutex_unlock(&rng->lock);
+ mutex_lock(&rng->lock);
}
static int exynos_rng_generate(struct crypto_rng *tfm,
@@ -209,6 +204,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
if (ret)
return ret;
+ mutex_lock(&rng->lock);
do {
ret = exynos_rng_get_random(rng, dst, dlen, &read);
if (ret)
@@ -219,6 +215,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
exynos_rng_reseed(rng);
} while (dlen > 0);
+ mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
@@ -236,7 +233,9 @@ static int exynos_rng_seed(struct crypto_rng *tfm, const u8 *seed,
if (ret)
return ret;
+ mutex_lock(&rng->lock);
ret = exynos_rng_set_seed(ctx->rng, seed, slen);
+ mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
@@ -259,7 +258,7 @@ static struct rng_alg exynos_rng_alg = {
.base = {
.cra_name = "stdrng",
.cra_driver_name = "exynos_rng",
- .cra_priority = 100,
+ .cra_priority = 300,
.cra_ctxsize = sizeof(struct exynos_rng_ctx),
.cra_module = THIS_MODULE,
.cra_init = exynos_rng_kcapi_init,
@@ -279,6 +278,10 @@ static int exynos_rng_probe(struct platform_device *pdev)
if (!rng)
return -ENOMEM;
+ rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev);
+
+ mutex_init(&rng->lock);
+
rng->dev = &pdev->dev;
rng->clk = devm_clk_get(&pdev->dev, "secss");
if (IS_ERR(rng->clk)) {
@@ -329,9 +332,14 @@ static int __maybe_unused exynos_rng_suspend(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&rng->lock);
+
/* Get new random numbers and store them for seeding on resume. */
exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save),
&(rng->seed_save_len));
+
+ mutex_unlock(&rng->lock);
+
dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n",
rng->seed_save_len);
@@ -354,8 +362,12 @@ static int __maybe_unused exynos_rng_resume(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&rng->lock);
+
ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len);
+ mutex_unlock(&rng->lock);
+
clk_disable_unprepare(rng->clk);
return ret;
@@ -367,6 +379,10 @@ static SIMPLE_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_suspend,
static const struct of_device_id exynos_rng_dt_match[] = {
{
.compatible = "samsung,exynos4-rng",
+ .data = (const void *)EXYNOS_PRNG_EXYNOS4,
+ }, {
+ .compatible = "samsung,exynos5250-prng",
+ .data = (const void *)EXYNOS_PRNG_EXYNOS5,
},
{ },
};
@@ -386,4 +402,4 @@ module_platform_driver(exynos_rng_driver);
MODULE_DESCRIPTION("Exynos H/W Random Number Generator driver");
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index e09d4055b19e..a5a36fe7bf2c 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2579,6 +2579,7 @@ err_out_unmap_bars:
for (i = 0; i < 3; ++i)
if (dev->bar[i])
iounmap(dev->bar[i]);
+ kfree(dev);
err_out_free_regions:
pci_release_regions(pdev);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 89ba9e85c0f3..225e74a7f724 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -108,10 +108,10 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
- priv->base + ctrl);
+ EIP197_PE(priv) + ctrl);
/* Enable access to the program memory */
- writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL);
+ writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
@@ -119,12 +119,12 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
/* Disable access to the program memory */
- writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL);
+ writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
/* Release engine from reset */
- val = readl(priv->base + ctrl);
+ val = readl(EIP197_PE(priv) + ctrl);
val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
- writel(val, priv->base + ctrl);
+ writel(val, EIP197_PE(priv) + ctrl);
}
static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
@@ -145,14 +145,14 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
}
/* Clear the scratchpad memory */
- val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+ val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
- writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+ writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
- memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0,
+ memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
@@ -173,7 +173,7 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
u32 hdw, cd_size_rnd, val;
int i;
- hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+ hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
hdw &= GENMASK(27, 25);
hdw >>= 25;
@@ -182,26 +182,25 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].cdr.base_dma),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
priv->config.cd_size,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
(EIP197_FETCH_COUNT * priv->config.cd_offset),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
- writel(val,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG);
+ writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(5, 0),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
}
return 0;
@@ -212,7 +211,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
u32 hdw, rd_size_rnd, val;
int i;
- hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+ hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
hdw &= GENMASK(27, 25);
hdw >>= 25;
@@ -221,33 +220,33 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].rdr.base_dma),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].rdr.base_dma),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
priv->config.rd_size,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
(EIP197_FETCH_COUNT * priv->config.rd_offset),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
writel(val,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(7, 0),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
/* enable ring interrupt */
- val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+ val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
val |= EIP197_RDR_IRQ(i);
- writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+ writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
}
return 0;
@@ -259,39 +258,40 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
int i, ret;
/* Determine endianess and configure byte swap */
- version = readl(priv->base + EIP197_HIA_VERSION);
- val = readl(priv->base + EIP197_HIA_MST_CTRL);
+ version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
+ val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
val |= EIP197_MST_CTRL_BYTE_SWAP;
else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
- writel(val, priv->base + EIP197_HIA_MST_CTRL);
-
+ writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
/* Configure wr/rd cache values */
writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
- priv->base + EIP197_MST_CTRL);
+ EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
/* Interrupts reset */
/* Disable all global interrupts */
- writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL);
+ writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
/* Clear any pending interrupt */
- writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK);
+ writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
/* Data Fetch Engine configuration */
/* Reset all DFE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
- priv->base + EIP197_HIA_DFE_THR_CTRL);
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
- /* Reset HIA input interface arbiter */
- writel(EIP197_HIA_RA_PE_CTRL_RESET,
- priv->base + EIP197_HIA_RA_PE_CTRL);
+ if (priv->version == EIP197) {
+ /* Reset HIA input interface arbiter */
+ writel(EIP197_HIA_RA_PE_CTRL_RESET,
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
+ }
/* DMA transfer size to use */
val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
@@ -299,29 +299,32 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
- writel(val, priv->base + EIP197_HIA_DFE_CFG);
+ writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
/* Leave the DFE threads reset state */
- writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL);
+ writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
/* Configure the procesing engine thresholds */
writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
- priv->base + EIP197_PE_IN_DBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
- priv->base + EIP197_PE_IN_TBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
- /* enable HIA input interface arbiter and rings */
- writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_RA_PE_CTRL);
+ if (priv->version == EIP197) {
+ /* enable HIA input interface arbiter and rings */
+ writel(EIP197_HIA_RA_PE_CTRL_EN |
+ GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
+ }
/* Data Store Engine configuration */
/* Reset all DSE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
- priv->base + EIP197_HIA_DSE_THR_CTRL);
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Wait for all DSE threads to complete */
- while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) &
+ while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
GENMASK(15, 12)) != GENMASK(15, 12))
;
@@ -330,15 +333,19 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
- val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
- writel(val, priv->base + EIP197_HIA_DSE_CFG);
+ /* FIXME: instability issues can occur for EIP97 but disabling it impact
+ * performances.
+ */
+ if (priv->version == EIP197)
+ val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+ writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
/* Leave the DSE threads reset state */
- writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL);
+ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Configure the procesing engine thresholds */
writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
- priv->base + EIP197_PE_OUT_DBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
/* Processing Engine configuration */
@@ -348,73 +355,75 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
val |= EIP197_ALG_SHA2;
- writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN);
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
/* Command Descriptor Rings prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Clear interrupts for this ring */
writel(GENMASK(31, 0),
- priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i));
+ EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
/* Disable external triggering */
- writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Result Descriptor Ring prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Disable external triggering*/
- writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
/* Ring size */
writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Enable command descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_DFE_THR_CTRL);
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
/* Enable result descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_DSE_THR_CTRL);
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Clear any HIA interrupt */
- writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK);
+ writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- eip197_trc_cache_init(priv);
+ if (priv->version == EIP197) {
+ eip197_trc_cache_init(priv);
- ret = eip197_load_firmwares(priv);
- if (ret)
- return ret;
+ ret = eip197_load_firmwares(priv);
+ if (ret)
+ return ret;
+ }
safexcel_hw_setup_cdesc_rings(priv);
safexcel_hw_setup_rdesc_rings(priv);
@@ -422,6 +431,23 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
return 0;
}
+/* Called with ring's lock taken */
+static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
+ int ring, int reqs)
+{
+ int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);
+
+ if (!coal)
+ return 0;
+
+ /* Configure when we want an interrupt */
+ writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+ EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
+
+ return coal;
+}
+
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
@@ -429,34 +455,36 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
- priv->ring[ring].need_dequeue = false;
+ /* If a request wasn't properly dequeued because of a lack of resources,
+ * proceeded it first,
+ */
+ req = priv->ring[ring].req;
+ backlog = priv->ring[ring].backlog;
+ if (req)
+ goto handle_req;
- do {
+ while (true) {
spin_lock_bh(&priv->ring[ring].queue_lock);
backlog = crypto_get_backlog(&priv->ring[ring].queue);
req = crypto_dequeue_request(&priv->ring[ring].queue);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!req)
+ if (!req) {
+ priv->ring[ring].req = NULL;
+ priv->ring[ring].backlog = NULL;
goto finalize;
+ }
+handle_req:
request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
- if (!request) {
- spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, req);
- spin_unlock_bh(&priv->ring[ring].queue_lock);
-
- priv->ring[ring].need_dequeue = true;
- goto finalize;
- }
+ if (!request)
+ goto request_failed;
ctx = crypto_tfm_ctx(req->tfm);
ret = ctx->send(req, ring, request, &commands, &results);
if (ret) {
kfree(request);
- req->complete(req, ret);
- priv->ring[ring].need_dequeue = true;
- goto finalize;
+ goto request_failed;
}
if (backlog)
@@ -468,30 +496,39 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
cdesc += commands;
rdesc += results;
- } while (nreq++ < EIP197_MAX_BATCH_SZ);
+ nreq++;
+ }
+
+request_failed:
+ /* Not enough resources to handle all the requests. Bail out and save
+ * the request and the backlog for the next dequeue call (per-ring).
+ */
+ priv->ring[ring].req = req;
+ priv->ring[ring].backlog = backlog;
finalize:
- if (nreq == EIP197_MAX_BATCH_SZ)
- priv->ring[ring].need_dequeue = true;
- else if (!nreq)
+ if (!nreq)
return;
- spin_lock_bh(&priv->ring[ring].lock);
+ spin_lock_bh(&priv->ring[ring].egress_lock);
- /* Configure when we want an interrupt */
- writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
- EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
+ if (!priv->ring[ring].busy) {
+ nreq -= safexcel_try_push_requests(priv, ring, nreq);
+ if (nreq)
+ priv->ring[ring].busy = true;
+ }
+
+ priv->ring[ring].requests_left += nreq;
+
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
/* let the CDR know we have pending descriptors */
writel((cdesc * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
-
- spin_unlock_bh(&priv->ring[ring].lock);
+ EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
void safexcel_free_context(struct safexcel_crypto_priv *priv,
@@ -540,7 +577,6 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
}
int safexcel_invalidate_cache(struct crypto_async_request *async,
- struct safexcel_context *ctx,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
struct safexcel_request *request)
@@ -587,14 +623,17 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
{
struct safexcel_request *sreq;
struct safexcel_context *ctx;
- int ret, i, nreq, ndesc = 0;
+ int ret, i, nreq, ndesc, tot_descs, done;
bool should_complete;
- nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
- nreq >>= 24;
- nreq &= GENMASK(6, 0);
+handle_results:
+ tot_descs = 0;
+
+ nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+ nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
+ nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
if (!nreq)
- return;
+ goto requests_left;
for (i = 0; i < nreq; i++) {
spin_lock_bh(&priv->ring[ring].egress_lock);
@@ -607,14 +646,11 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
ndesc = ctx->handle_result(priv, ring, sreq->req,
&should_complete, &ret);
if (ndesc < 0) {
+ kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
- return;
+ goto acknowledge;
}
- writel(EIP197_xDR_PROC_xD_PKT(1) |
- EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset),
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
-
if (should_complete) {
local_bh_disable();
sreq->req->complete(sreq->req, ret);
@@ -622,19 +658,41 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
}
kfree(sreq);
+ tot_descs += ndesc;
}
+
+acknowledge:
+ if (i) {
+ writel(EIP197_xDR_PROC_xD_PKT(i) |
+ EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+ }
+
+ /* If the number of requests overflowed the counter, try to proceed more
+ * requests.
+ */
+ if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
+ goto handle_results;
+
+requests_left:
+ spin_lock_bh(&priv->ring[ring].egress_lock);
+
+ done = safexcel_try_push_requests(priv, ring,
+ priv->ring[ring].requests_left);
+
+ priv->ring[ring].requests_left -= done;
+ if (!done && !priv->ring[ring].requests_left)
+ priv->ring[ring].busy = false;
+
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
}
-static void safexcel_handle_result_work(struct work_struct *work)
+static void safexcel_dequeue_work(struct work_struct *work)
{
struct safexcel_work_data *data =
container_of(work, struct safexcel_work_data, work);
- struct safexcel_crypto_priv *priv = data->priv;
-
- safexcel_handle_result_descriptor(priv, data->ring);
- if (priv->ring[data->ring].need_dequeue)
- safexcel_dequeue(data->priv, data->ring);
+ safexcel_dequeue(data->priv, data->ring);
}
struct safexcel_ring_irq_data {
@@ -646,16 +704,16 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
{
struct safexcel_ring_irq_data *irq_data = data;
struct safexcel_crypto_priv *priv = irq_data->priv;
- int ring = irq_data->ring;
+ int ring = irq_data->ring, rc = IRQ_NONE;
u32 status, stat;
- status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
+ status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
if (!status)
- return IRQ_NONE;
+ return rc;
/* RDR interrupts */
if (status & EIP197_RDR_IRQ(ring)) {
- stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+ stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
if (unlikely(stat & EIP197_xDR_ERR)) {
/*
@@ -665,22 +723,37 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
*/
dev_err(priv->dev, "RDR: fatal error.");
} else if (likely(stat & EIP197_xDR_THRESH)) {
- queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work);
+ rc = IRQ_WAKE_THREAD;
}
/* ACK the interrupts */
writel(stat & 0xff,
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
}
/* ACK the interrupts */
- writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring));
+ writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
+
+ return rc;
+}
+
+static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
+{
+ struct safexcel_ring_irq_data *irq_data = data;
+ struct safexcel_crypto_priv *priv = irq_data->priv;
+ int ring = irq_data->ring;
+
+ safexcel_handle_result_descriptor(priv, ring);
+
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return IRQ_HANDLED;
}
static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
irq_handler_t handler,
+ irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
int ret, irq = platform_get_irq_byname(pdev, name);
@@ -690,8 +763,9 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n
return irq;
}
- ret = devm_request_irq(&pdev->dev, irq, handler, 0,
- dev_name(&pdev->dev), ring_irq_priv);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
+ threaded_handler, IRQF_ONESHOT,
+ dev_name(&pdev->dev), ring_irq_priv);
if (ret) {
dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
return ret;
@@ -754,11 +828,11 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
u32 val, mask;
- val = readl(priv->base + EIP197_HIA_OPTIONS);
+ val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
val = (val & GENMASK(27, 25)) >> 25;
mask = BIT(val) - 1;
- val = readl(priv->base + EIP197_HIA_OPTIONS);
+ val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
@@ -768,6 +842,35 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
}
+static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
+{
+ struct safexcel_register_offsets *offsets = &priv->offsets;
+
+ if (priv->version == EIP197) {
+ offsets->hia_aic = EIP197_HIA_AIC_BASE;
+ offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
+ offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
+ offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
+ offsets->hia_dfe = EIP197_HIA_DFE_BASE;
+ offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
+ offsets->hia_dse = EIP197_HIA_DSE_BASE;
+ offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
+ offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
+ offsets->pe = EIP197_PE_BASE;
+ } else {
+ offsets->hia_aic = EIP97_HIA_AIC_BASE;
+ offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
+ offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
+ offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
+ offsets->hia_dfe = EIP97_HIA_DFE_BASE;
+ offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
+ offsets->hia_dse = EIP97_HIA_DSE_BASE;
+ offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
+ offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
+ offsets->pe = EIP97_PE_BASE;
+ }
+}
+
static int safexcel_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -780,6 +883,9 @@ static int safexcel_probe(struct platform_device *pdev)
return -ENOMEM;
priv->dev = dev;
+ priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+
+ safexcel_init_register_offsets(priv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(dev, res);
@@ -838,6 +944,7 @@ static int safexcel_probe(struct platform_device *pdev)
snprintf(irq_name, 6, "ring%d", i);
irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
+ safexcel_irq_ring_thread,
ring_irq);
if (irq < 0) {
ret = irq;
@@ -846,7 +953,7 @@ static int safexcel_probe(struct platform_device *pdev)
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
- INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work);
+ INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
@@ -855,6 +962,9 @@ static int safexcel_probe(struct platform_device *pdev)
goto err_clk;
}
+ priv->ring[i].requests_left = 0;
+ priv->ring[i].busy = false;
+
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
@@ -902,7 +1012,14 @@ static int safexcel_remove(struct platform_device *pdev)
}
static const struct of_device_id safexcel_of_match_table[] = {
- { .compatible = "inside-secure,safexcel-eip197" },
+ {
+ .compatible = "inside-secure,safexcel-eip97",
+ .data = (void *)EIP97,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197",
+ .data = (void *)EIP197,
+ },
{},
};
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 304c5838c11a..4e219c21608b 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -19,64 +19,103 @@
#define EIP197_HIA_VERSION_BE 0x35ca
/* Static configuration */
-#define EIP197_DEFAULT_RING_SIZE 64
+#define EIP197_DEFAULT_RING_SIZE 400
#define EIP197_MAX_TOKENS 5
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_COUNT 1
-#define EIP197_MAX_BATCH_SZ EIP197_DEFAULT_RING_SIZE
+#define EIP197_MAX_BATCH_SZ 64
#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
GFP_KERNEL : GFP_ATOMIC)
+/* Register base offsets */
+#define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic)
+#define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g)
+#define EIP197_HIA_AIC_R(priv) ((priv)->base + (priv)->offsets.hia_aic_r)
+#define EIP197_HIA_AIC_xDR(priv) ((priv)->base + (priv)->offsets.hia_aic_xdr)
+#define EIP197_HIA_DFE(priv) ((priv)->base + (priv)->offsets.hia_dfe)
+#define EIP197_HIA_DFE_THR(priv) ((priv)->base + (priv)->offsets.hia_dfe_thr)
+#define EIP197_HIA_DSE(priv) ((priv)->base + (priv)->offsets.hia_dse)
+#define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr)
+#define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg)
+#define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe)
+
+/* EIP197 base offsets */
+#define EIP197_HIA_AIC_BASE 0x90000
+#define EIP197_HIA_AIC_G_BASE 0x90000
+#define EIP197_HIA_AIC_R_BASE 0x90800
+#define EIP197_HIA_AIC_xDR_BASE 0x80000
+#define EIP197_HIA_DFE_BASE 0x8c000
+#define EIP197_HIA_DFE_THR_BASE 0x8c040
+#define EIP197_HIA_DSE_BASE 0x8d000
+#define EIP197_HIA_DSE_THR_BASE 0x8d040
+#define EIP197_HIA_GEN_CFG_BASE 0xf0000
+#define EIP197_PE_BASE 0xa0000
+
+/* EIP97 base offsets */
+#define EIP97_HIA_AIC_BASE 0x0
+#define EIP97_HIA_AIC_G_BASE 0x0
+#define EIP97_HIA_AIC_R_BASE 0x0
+#define EIP97_HIA_AIC_xDR_BASE 0x0
+#define EIP97_HIA_DFE_BASE 0xf000
+#define EIP97_HIA_DFE_THR_BASE 0xf200
+#define EIP97_HIA_DSE_BASE 0xf400
+#define EIP97_HIA_DSE_THR_BASE 0xf600
+#define EIP97_HIA_GEN_CFG_BASE 0x10000
+#define EIP97_PE_BASE 0x10000
+
/* CDR/RDR register offsets */
-#define EIP197_HIA_xDR_OFF(r) (0x80000 + (r) * 0x1000)
-#define EIP197_HIA_CDR(r) (EIP197_HIA_xDR_OFF(r))
-#define EIP197_HIA_RDR(r) (EIP197_HIA_xDR_OFF(r) + 0x800)
-#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0
-#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x4
-#define EIP197_HIA_xDR_RING_SIZE 0x18
-#define EIP197_HIA_xDR_DESC_SIZE 0x1c
-#define EIP197_HIA_xDR_CFG 0x20
-#define EIP197_HIA_xDR_DMA_CFG 0x24
-#define EIP197_HIA_xDR_THRESH 0x28
-#define EIP197_HIA_xDR_PREP_COUNT 0x2c
-#define EIP197_HIA_xDR_PROC_COUNT 0x30
-#define EIP197_HIA_xDR_PREP_PNTR 0x34
-#define EIP197_HIA_xDR_PROC_PNTR 0x38
-#define EIP197_HIA_xDR_STAT 0x3c
+#define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000)
+#define EIP197_HIA_CDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r))
+#define EIP197_HIA_RDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r) + 0x800)
+#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0000
+#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x0004
+#define EIP197_HIA_xDR_RING_SIZE 0x0018
+#define EIP197_HIA_xDR_DESC_SIZE 0x001c
+#define EIP197_HIA_xDR_CFG 0x0020
+#define EIP197_HIA_xDR_DMA_CFG 0x0024
+#define EIP197_HIA_xDR_THRESH 0x0028
+#define EIP197_HIA_xDR_PREP_COUNT 0x002c
+#define EIP197_HIA_xDR_PROC_COUNT 0x0030
+#define EIP197_HIA_xDR_PREP_PNTR 0x0034
+#define EIP197_HIA_xDR_PROC_PNTR 0x0038
+#define EIP197_HIA_xDR_STAT 0x003c
/* register offsets */
-#define EIP197_HIA_DFE_CFG 0x8c000
-#define EIP197_HIA_DFE_THR_CTRL 0x8c040
-#define EIP197_HIA_DFE_THR_STAT 0x8c044
-#define EIP197_HIA_DSE_CFG 0x8d000
-#define EIP197_HIA_DSE_THR_CTRL 0x8d040
-#define EIP197_HIA_DSE_THR_STAT 0x8d044
-#define EIP197_HIA_RA_PE_CTRL 0x90010
-#define EIP197_HIA_RA_PE_STAT 0x90014
+#define EIP197_HIA_DFE_CFG 0x0000
+#define EIP197_HIA_DFE_THR_CTRL 0x0000
+#define EIP197_HIA_DFE_THR_STAT 0x0004
+#define EIP197_HIA_DSE_CFG 0x0000
+#define EIP197_HIA_DSE_THR_CTRL 0x0000
+#define EIP197_HIA_DSE_THR_STAT 0x0004
+#define EIP197_HIA_RA_PE_CTRL 0x0010
+#define EIP197_HIA_RA_PE_STAT 0x0014
#define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000)
-#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0x9e808 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ACK(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0x9e814 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_G_ENABLE_CTRL 0x9f808
-#define EIP197_HIA_AIC_G_ENABLED_STAT 0x9f810
-#define EIP197_HIA_AIC_G_ACK 0x9f810
-#define EIP197_HIA_MST_CTRL 0x9fff4
-#define EIP197_HIA_OPTIONS 0x9fff8
-#define EIP197_HIA_VERSION 0x9fffc
-#define EIP197_PE_IN_DBUF_THRES 0xa0000
-#define EIP197_PE_IN_TBUF_THRES 0xa0100
-#define EIP197_PE_ICE_SCRATCH_RAM 0xa0800
-#define EIP197_PE_ICE_PUE_CTRL 0xa0c80
-#define EIP197_PE_ICE_SCRATCH_CTRL 0xa0d04
-#define EIP197_PE_ICE_FPP_CTRL 0xa0d80
-#define EIP197_PE_ICE_RAM_CTRL 0xa0ff0
-#define EIP197_PE_EIP96_FUNCTION_EN 0xa1004
-#define EIP197_PE_EIP96_CONTEXT_CTRL 0xa1008
-#define EIP197_PE_EIP96_CONTEXT_STAT 0xa100c
-#define EIP197_PE_OUT_DBUF_THRES 0xa1c00
-#define EIP197_PE_OUT_TBUF_THRES 0xa1d00
+#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808
+#define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810
+#define EIP197_HIA_AIC_G_ACK 0xf810
+#define EIP197_HIA_MST_CTRL 0xfff4
+#define EIP197_HIA_OPTIONS 0xfff8
+#define EIP197_HIA_VERSION 0xfffc
+#define EIP197_PE_IN_DBUF_THRES 0x0000
+#define EIP197_PE_IN_TBUF_THRES 0x0100
+#define EIP197_PE_ICE_SCRATCH_RAM 0x0800
+#define EIP197_PE_ICE_PUE_CTRL 0x0c80
+#define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04
+#define EIP197_PE_ICE_FPP_CTRL 0x0d80
+#define EIP197_PE_ICE_RAM_CTRL 0x0ff0
+#define EIP197_PE_EIP96_FUNCTION_EN 0x1004
+#define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008
+#define EIP197_PE_EIP96_CONTEXT_STAT 0x100c
+#define EIP197_PE_OUT_DBUF_THRES 0x1c00
+#define EIP197_PE_OUT_TBUF_THRES 0x1d00
+#define EIP197_MST_CTRL 0xfff4
+
+/* EIP197-specific registers, no indirection */
#define EIP197_CLASSIFICATION_RAMS 0xe0000
#define EIP197_TRC_CTRL 0xf0800
#define EIP197_TRC_LASTRES 0xf0804
@@ -90,7 +129,6 @@
#define EIP197_TRC_ECCDATASTAT 0xf083c
#define EIP197_TRC_ECCDATA 0xf0840
#define EIP197_CS_RAM_CTRL 0xf7ff0
-#define EIP197_MST_CTRL 0xffff4
/* EIP197_HIA_xDR_DESC_SIZE */
#define EIP197_xDR_DESC_MODE_64BIT BIT(31)
@@ -117,6 +155,8 @@
#define EIP197_xDR_PREP_CLR_COUNT BIT(31)
/* EIP197_HIA_xDR_PROC_COUNT */
+#define EIP197_xDR_PROC_xD_PKT_OFFSET 24
+#define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0)
#define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2)
#define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24)
#define EIP197_xDR_PROC_CLR_COUNT BIT(31)
@@ -463,12 +503,33 @@ struct safexcel_work_data {
int ring;
};
+enum safexcel_eip_version {
+ EIP97,
+ EIP197,
+};
+
+struct safexcel_register_offsets {
+ u32 hia_aic;
+ u32 hia_aic_g;
+ u32 hia_aic_r;
+ u32 hia_aic_xdr;
+ u32 hia_dfe;
+ u32 hia_dfe_thr;
+ u32 hia_dse;
+ u32 hia_dse_thr;
+ u32 hia_gen_cfg;
+ u32 pe;
+};
+
struct safexcel_crypto_priv {
void __iomem *base;
struct device *dev;
struct clk *clk;
struct safexcel_config config;
+ enum safexcel_eip_version version;
+ struct safexcel_register_offsets offsets;
+
/* context DMA pool */
struct dma_pool *context_pool;
@@ -489,7 +550,20 @@ struct safexcel_crypto_priv {
/* queue */
struct crypto_queue queue;
spinlock_t queue_lock;
- bool need_dequeue;
+
+ /* Number of requests in the engine that needs the threshold
+ * interrupt to be set up.
+ */
+ int requests_left;
+
+ /* The ring is currently handling at least one request */
+ bool busy;
+
+ /* Store for current requests when bailing out of the dequeueing
+ * function when no enough resources are available.
+ */
+ struct crypto_async_request *req;
+ struct crypto_async_request *backlog;
} ring[EIP197_MAX_RINGS];
};
@@ -539,7 +613,6 @@ void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req,
int result_sz);
int safexcel_invalidate_cache(struct crypto_async_request *async,
- struct safexcel_context *ctx,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
struct safexcel_request *request);
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 5438552bc6d7..63a8768ed2ae 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -14,6 +14,7 @@
#include <crypto/aes.h>
#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include "safexcel.h"
@@ -26,13 +27,17 @@ struct safexcel_cipher_ctx {
struct safexcel_context base;
struct safexcel_crypto_priv *priv;
- enum safexcel_cipher_direction direction;
u32 mode;
__le32 key[8];
unsigned int key_len;
};
+struct safexcel_cipher_req {
+ enum safexcel_cipher_direction direction;
+ bool needs_inv;
+};
+
static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
struct crypto_async_request *async,
struct safexcel_command_desc *cdesc,
@@ -64,6 +69,7 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
struct crypto_aes_ctx aes;
int ret, i;
@@ -73,10 +79,12 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
return ret;
}
- for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
- ctx->base.needs_inv = true;
- break;
+ if (priv->version == EIP197 && ctx->base.ctxr_dma) {
+ for (i = 0; i < len / sizeof(u32); i++) {
+ if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ ctx->base.needs_inv = true;
+ break;
+ }
}
}
@@ -90,12 +98,15 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
}
static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
+ struct crypto_async_request *async,
struct safexcel_command_desc *cdesc)
{
struct safexcel_crypto_priv *priv = ctx->priv;
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
int ctrl_size;
- if (ctx->direction == SAFEXCEL_ENCRYPT)
+ if (sreq->direction == SAFEXCEL_ENCRYPT)
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
else
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
@@ -126,9 +137,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
return 0;
}
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_result_desc *rdesc;
@@ -238,7 +249,7 @@ static int safexcel_aes_send(struct crypto_async_request *async,
n_cdesc++;
if (n_cdesc == 1) {
- safexcel_context_control(ctx, cdesc);
+ safexcel_context_control(ctx, async, cdesc);
safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
}
@@ -265,7 +276,6 @@ static int safexcel_aes_send(struct crypto_async_request *async,
spin_unlock_bh(&priv->ring[ring].egress_lock);
request->req = &req->base;
- ctx->base.handle_result = safexcel_handle_result;
*commands = n_cdesc;
*results = n_rdesc;
@@ -341,8 +351,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
- ctx->base.needs_inv = false;
- ctx->base.send = safexcel_aes_send;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -351,14 +359,34 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
*should_complete = false;
return ndesc;
}
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ int err;
+
+ if (sreq->needs_inv) {
+ sreq->needs_inv = false;
+ err = safexcel_handle_inv_result(priv, ring, async,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async,
+ should_complete, ret);
+ }
+
+ return err;
+}
+
static int safexcel_cipher_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -368,9 +396,7 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ctx->base.handle_result = safexcel_handle_inv_result;
-
- ret = safexcel_invalidate_cache(async, &ctx->base, priv,
+ ret = safexcel_invalidate_cache(async, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
return ret;
@@ -381,32 +407,54 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
return 0;
}
+static int safexcel_send(struct crypto_async_request *async,
+ int ring, struct safexcel_request *request,
+ int *commands, int *results)
+{
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ struct safexcel_crypto_priv *priv = ctx->priv;
+ int ret;
+
+ BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+
+ if (sreq->needs_inv)
+ ret = safexcel_cipher_send_inv(async, ring, request,
+ commands, results);
+ else
+ ret = safexcel_aes_send(async, ring, request,
+ commands, results);
+ return ret;
+}
+
static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- struct skcipher_request req;
+ SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(&req, 0, sizeof(struct skcipher_request));
+ memset(req, 0, sizeof(struct skcipher_request));
/* create invalidation request */
init_completion(&result.completion);
- skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ safexcel_inv_complete, &result);
- skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
- ctx = crypto_tfm_ctx(req.base.tfm);
+ skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
+ ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
- ctx->base.send = safexcel_cipher_send_inv;
+ sreq->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+ crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion);
@@ -424,19 +472,21 @@ static int safexcel_aes(struct skcipher_request *req,
enum safexcel_cipher_direction dir, u32 mode)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
- ctx->direction = dir;
+ sreq->needs_inv = false;
+ sreq->direction = dir;
ctx->mode = mode;
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv)
- ctx->base.send = safexcel_cipher_send_inv;
+ if (priv->version == EIP197 && ctx->base.needs_inv) {
+ sreq->needs_inv = true;
+ ctx->base.needs_inv = false;
+ }
} else {
ctx->base.ring = safexcel_select_ring(priv);
- ctx->base.send = safexcel_aes_send;
-
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
EIP197_GFP_FLAGS(req->base),
&ctx->base.ctxr_dma);
@@ -450,8 +500,8 @@ static int safexcel_aes(struct skcipher_request *req,
ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return ret;
}
@@ -476,6 +526,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
alg.skcipher.base);
ctx->priv = tmpl->priv;
+ ctx->base.send = safexcel_send;
+ ctx->base.handle_result = safexcel_handle_result;
+
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct safexcel_cipher_req));
return 0;
}
@@ -494,9 +549,14 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
- ret = safexcel_cipher_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ if (priv->version == EIP197) {
+ ret = safexcel_cipher_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
}
struct safexcel_alg_template safexcel_alg_ecb_aes = {
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 3980f946874f..122a2a58e98f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -14,7 +14,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
-
#include "safexcel.h"
struct safexcel_ahash_ctx {
@@ -32,9 +31,12 @@ struct safexcel_ahash_req {
bool last_req;
bool finish;
bool hmac;
+ bool needs_inv;
+
+ int nents;
u8 state_sz; /* expected sate size, only set once */
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
u64 len;
u64 processed;
@@ -119,15 +121,15 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
}
}
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
{
struct safexcel_result_desc *rdesc;
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
- int cache_len, result_sz = sreq->state_sz;
+ int cache_len;
*ret = 0;
@@ -148,11 +150,13 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
spin_unlock_bh(&priv->ring[ring].egress_lock);
if (sreq->finish)
- result_sz = crypto_ahash_digestsize(ahash);
- memcpy(sreq->state, areq->result, result_sz);
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
- dma_unmap_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
+ if (sreq->nents) {
+ dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+ sreq->nents = 0;
+ }
safexcel_free_context(priv, async, sreq->state_sz);
@@ -165,9 +169,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
return 1;
}
-static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request, int *commands,
- int *results)
+static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+ struct safexcel_request *request,
+ int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
@@ -177,7 +181,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+ int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
queued = len = req->len - req->processed;
if (queued < crypto_ahash_blocksize(ahash))
@@ -185,17 +189,31 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
else
cache_len = queued - areq->nbytes;
- /*
- * If this is not the last request and the queued data does not fit
- * into full blocks, cache it for the next send() call.
- */
- extra = queued & (crypto_ahash_blocksize(ahash) - 1);
- if (!req->last_req && extra) {
- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
- req->cache_next, extra, areq->nbytes - extra);
-
- queued -= extra;
- len -= extra;
+ if (!req->last_req) {
+ /* If this is not the last request and the queued data does not
+ * fit into full blocks, cache it for the next send() call.
+ */
+ extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+ if (!extra)
+ /* If this is not the last request and the queued data
+ * is a multiple of a block, cache the last one for now.
+ */
+ extra = queued - crypto_ahash_blocksize(ahash);
+
+ if (extra) {
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req->cache_next, extra,
+ areq->nbytes - extra);
+
+ queued -= extra;
+ len -= extra;
+
+ if (!queued) {
+ *commands = 0;
+ *results = 0;
+ return 0;
+ }
+ }
}
spin_lock_bh(&priv->ring[ring].egress_lock);
@@ -233,15 +251,15 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
}
/* Now handle the current ahash request buffer(s) */
- nents = dma_map_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes),
- DMA_TO_DEVICE);
- if (!nents) {
+ req->nents = dma_map_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
+ if (!req->nents) {
ret = -ENOMEM;
goto cdesc_rollback;
}
- for_each_sg(areq->src, sg, nents, i) {
+ for_each_sg(areq->src, sg, req->nents, i) {
int sglen = sg_dma_len(sg);
/* Do not overflow the request */
@@ -273,7 +291,7 @@ send_command:
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
- ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
+ ctx->base.result_dma = dma_map_single(priv->dev, req->state,
req->state_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
ret = -EINVAL;
@@ -292,7 +310,6 @@ send_command:
req->processed += len;
request->req = &areq->base;
- ctx->base.handle_result = safexcel_handle_result;
*commands = n_cdesc;
*results = 1;
@@ -308,10 +325,8 @@ unmap_cache:
ctx->base.cache_sz = 0;
}
free_cache:
- if (ctx->base.cache) {
- kfree(ctx->base.cache);
- ctx->base.cache = NULL;
- }
+ kfree(ctx->base.cache);
+ ctx->base.cache = NULL;
unlock:
spin_unlock_bh(&priv->ring[ring].egress_lock);
@@ -376,8 +391,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
- ctx->base.needs_inv = false;
- ctx->base.send = safexcel_ahash_send;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -386,14 +399,36 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
*should_complete = false;
return 1;
}
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+ struct ahash_request *areq = ahash_request_cast(async);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ int err;
+
+ BUG_ON(priv->version == EIP97 && req->needs_inv);
+
+ if (req->needs_inv) {
+ req->needs_inv = false;
+ err = safexcel_handle_inv_result(priv, ring, async,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async,
+ should_complete, ret);
+ }
+
+ return err;
+}
+
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -402,8 +437,7 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
- ctx->base.handle_result = safexcel_handle_inv_result;
- ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
+ ret = safexcel_invalidate_cache(async, ctx->priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
return ret;
@@ -414,32 +448,50 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
return 0;
}
+static int safexcel_ahash_send(struct crypto_async_request *async,
+ int ring, struct safexcel_request *request,
+ int *commands, int *results)
+{
+ struct ahash_request *areq = ahash_request_cast(async);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ int ret;
+
+ if (req->needs_inv)
+ ret = safexcel_ahash_send_inv(async, ring, request,
+ commands, results);
+ else
+ ret = safexcel_ahash_send_req(async, ring, request,
+ commands, results);
+ return ret;
+}
+
static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- struct ahash_request req;
+ AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
+ struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(&req, 0, sizeof(struct ahash_request));
+ memset(req, 0, sizeof(struct ahash_request));
/* create invalidation request */
init_completion(&result.completion);
- ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
safexcel_inv_complete, &result);
- ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
- ctx = crypto_tfm_ctx(req.base.tfm);
+ ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
+ ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
- ctx->base.send = safexcel_ahash_send_inv;
+ rctx->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+ crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion);
@@ -452,13 +504,23 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
return 0;
}
+/* safexcel_ahash_cache: cache data until at least one request can be sent to
+ * the engine, aka. when there is at least 1 block size in the pipe.
+ */
static int safexcel_ahash_cache(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
int queued, cache_len;
+ /* cache_len: everyting accepted by the driver but not sent yet,
+ * tot sz handled by update() - last req sz - tot sz handled by send()
+ */
cache_len = req->len - areq->nbytes - req->processed;
+ /* queued: everything accepted by the driver which will be handled by
+ * the next send() calls.
+ * tot sz handled by update() - tot sz handled by send()
+ */
queued = req->len - req->processed;
/*
@@ -472,7 +534,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
return areq->nbytes;
}
- /* We could'nt cache all the data */
+ /* We couldn't cache all the data */
return -E2BIG;
}
@@ -483,14 +545,23 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
- ctx->base.send = safexcel_ahash_send;
-
- if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
- ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+ req->needs_inv = false;
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv)
- ctx->base.send = safexcel_ahash_send_inv;
+ if (priv->version == EIP197 &&
+ !ctx->base.needs_inv && req->processed &&
+ ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+ /* We're still setting needs_inv here, even though it is
+ * cleared right away, because the needs_inv flag can be
+ * set in other functions and we want to keep the same
+ * logic.
+ */
+ ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+
+ if (ctx->base.needs_inv) {
+ ctx->base.needs_inv = false;
+ req->needs_inv = true;
+ }
} else {
ctx->base.ring = safexcel_select_ring(priv);
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
@@ -506,8 +577,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return ret;
}
@@ -590,7 +661,6 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
export->processed = req->processed;
memcpy(export->state, req->state, req->state_sz);
- memset(export->cache, 0, crypto_ahash_blocksize(ahash));
memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
return 0;
@@ -624,6 +694,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
struct safexcel_alg_template, alg.ahash);
ctx->priv = tmpl->priv;
+ ctx->base.send = safexcel_ahash_send;
+ ctx->base.handle_result = safexcel_handle_result;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct safexcel_ahash_req));
@@ -670,9 +742,14 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
if (!ctx->base.ctxr)
return;
- ret = safexcel_ahash_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+ if (priv->version == EIP197) {
+ ret = safexcel_ahash_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
}
struct safexcel_alg_template safexcel_alg_sha1 = {
@@ -811,7 +888,7 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq,
req->last_req = true;
ret = crypto_ahash_update(areq);
- if (ret && ret != -EINPROGRESS)
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
return ret;
wait_for_completion_interruptible(&result.completion);
@@ -876,6 +953,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_ahash_export_state istate, ostate;
int ret, i;
@@ -883,11 +961,13 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
- if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
- ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
- ctx->base.needs_inv = true;
- break;
+ if (priv->version == EIP197 && ctx->base.ctxr) {
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+ if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
+ ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
+ ctx->base.needs_inv = true;
+ break;
+ }
}
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index dadc4a808df5..717a26607bdb 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -260,12 +260,11 @@ static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
- crypt_virt = dma_alloc_coherent(dev,
- NPE_QLEN * sizeof(struct crypt_ctl),
- &crypt_phys, GFP_ATOMIC);
+ crypt_virt = dma_zalloc_coherent(dev,
+ NPE_QLEN * sizeof(struct crypt_ctl),
+ &crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
- memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
return 0;
}
@@ -534,7 +533,6 @@ static void release_ixp_crypto(struct device *dev)
NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
crypt_virt, crypt_phys);
}
- return;
}
static void reset_sa_dir(struct ix_sa_dir *dir)
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 6e7a5c77a00a..aca2373fa1de 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -15,6 +15,7 @@
*/
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -24,6 +25,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -34,10 +36,6 @@
/* Limit of the crypto queue before reaching the backlog */
#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
-static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
-module_param_named(allhwsupport, allhwsupport, int, 0444);
-MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
-
struct mv_cesa_dev *cesa_dev;
struct crypto_async_request *
@@ -76,8 +74,6 @@ static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
ctx = crypto_tfm_ctx(req->tfm);
ctx->ops->step(req);
-
- return;
}
static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
@@ -183,8 +179,7 @@ int mv_cesa_queue_req(struct crypto_async_request *req,
spin_lock_bh(&engine->lock);
ret = crypto_enqueue_request(&engine->queue, req);
if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
- (ret == -EINPROGRESS ||
- (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ (ret == -EINPROGRESS || ret == -EBUSY))
mv_cesa_tdma_chain(engine, creq);
spin_unlock_bh(&engine->lock);
@@ -202,7 +197,7 @@ static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
int i, j;
for (i = 0; i < cesa->caps->ncipher_algs; i++) {
- ret = crypto_register_alg(cesa->caps->cipher_algs[i]);
+ ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
if (ret)
goto err_unregister_crypto;
}
@@ -222,7 +217,7 @@ err_unregister_ahash:
err_unregister_crypto:
for (j = 0; j < i; j++)
- crypto_unregister_alg(cesa->caps->cipher_algs[j]);
+ crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
return ret;
}
@@ -235,10 +230,10 @@ static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
for (i = 0; i < cesa->caps->ncipher_algs; i++)
- crypto_unregister_alg(cesa->caps->cipher_algs[i]);
+ crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
}
-static struct crypto_alg *orion_cipher_algs[] = {
+static struct skcipher_alg *orion_cipher_algs[] = {
&mv_cesa_ecb_des_alg,
&mv_cesa_cbc_des_alg,
&mv_cesa_ecb_des3_ede_alg,
@@ -254,7 +249,7 @@ static struct ahash_alg *orion_ahash_algs[] = {
&mv_ahmac_sha1_alg,
};
-static struct crypto_alg *armada_370_cipher_algs[] = {
+static struct skcipher_alg *armada_370_cipher_algs[] = {
&mv_cesa_ecb_des_alg,
&mv_cesa_cbc_des_alg,
&mv_cesa_ecb_des3_ede_alg,
@@ -416,8 +411,11 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
if (IS_ERR(engine->sram))
return PTR_ERR(engine->sram);
- engine->sram_dma = phys_to_dma(cesa->dev,
- (phys_addr_t)res->start);
+ engine->sram_dma = dma_map_resource(cesa->dev, res->start,
+ cesa->sram_size,
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(cesa->dev, engine->sram_dma))
+ return -ENOMEM;
return 0;
}
@@ -427,11 +425,12 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
struct mv_cesa_engine *engine = &cesa->engines[idx];
- if (!engine->pool)
- return;
-
- gen_pool_free(engine->pool, (unsigned long)engine->sram,
- cesa->sram_size);
+ if (engine->pool)
+ gen_pool_free(engine->pool, (unsigned long)engine->sram,
+ cesa->sram_size);
+ else
+ dma_unmap_resource(cesa->dev, engine->sram_dma,
+ cesa->sram_size, DMA_BIDIRECTIONAL, 0);
}
static int mv_cesa_probe(struct platform_device *pdev)
@@ -459,9 +458,6 @@ static int mv_cesa_probe(struct platform_device *pdev)
caps = match->data;
}
- if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport)
- return -ENOTSUPP;
-
cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
if (!cesa)
return -ENOMEM;
@@ -599,9 +595,16 @@ static int mv_cesa_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id mv_cesa_plat_id_table[] = {
+ { .name = "mv_crypto" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
+
static struct platform_driver marvell_cesa = {
.probe = mv_cesa_probe,
.remove = mv_cesa_remove,
+ .id_table = mv_cesa_plat_id_table,
.driver = {
.name = "marvell-cesa",
.of_match_table = mv_cesa_of_match_table,
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index 0032e3bf46ee..d63a6ee905c9 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -5,6 +5,7 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <linux/crypto.h>
#include <linux/dmapool.h>
@@ -373,7 +374,7 @@ struct mv_cesa_engine;
struct mv_cesa_caps {
int nengines;
bool has_tdma;
- struct crypto_alg **cipher_algs;
+ struct skcipher_alg **cipher_algs;
int ncipher_algs;
struct ahash_alg **ahash_algs;
int nahash_algs;
@@ -539,12 +540,12 @@ struct mv_cesa_sg_std_iter {
};
/**
- * struct mv_cesa_ablkcipher_std_req - cipher standard request
+ * struct mv_cesa_skcipher_std_req - cipher standard request
* @op: operation context
* @offset: current operation offset
* @size: size of the crypto operation
*/
-struct mv_cesa_ablkcipher_std_req {
+struct mv_cesa_skcipher_std_req {
struct mv_cesa_op_ctx op;
unsigned int offset;
unsigned int size;
@@ -552,14 +553,14 @@ struct mv_cesa_ablkcipher_std_req {
};
/**
- * struct mv_cesa_ablkcipher_req - cipher request
+ * struct mv_cesa_skcipher_req - cipher request
* @req: type specific request information
* @src_nents: number of entries in the src sg list
* @dst_nents: number of entries in the dest sg list
*/
-struct mv_cesa_ablkcipher_req {
+struct mv_cesa_skcipher_req {
struct mv_cesa_req base;
- struct mv_cesa_ablkcipher_std_req std;
+ struct mv_cesa_skcipher_std_req std;
int src_nents;
int dst_nents;
};
@@ -764,7 +765,7 @@ static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
* the backlog and will be processed later. There's no need to
* clean it up.
*/
- if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ if (ret == -EBUSY)
return false;
/* Request wasn't queued, we need to clean it up */
@@ -869,11 +870,11 @@ extern struct ahash_alg mv_ahmac_md5_alg;
extern struct ahash_alg mv_ahmac_sha1_alg;
extern struct ahash_alg mv_ahmac_sha256_alg;
-extern struct crypto_alg mv_cesa_ecb_des_alg;
-extern struct crypto_alg mv_cesa_cbc_des_alg;
-extern struct crypto_alg mv_cesa_ecb_des3_ede_alg;
-extern struct crypto_alg mv_cesa_cbc_des3_ede_alg;
-extern struct crypto_alg mv_cesa_ecb_aes_alg;
-extern struct crypto_alg mv_cesa_cbc_aes_alg;
+extern struct skcipher_alg mv_cesa_ecb_des_alg;
+extern struct skcipher_alg mv_cesa_cbc_des_alg;
+extern struct skcipher_alg mv_cesa_ecb_des3_ede_alg;
+extern struct skcipher_alg mv_cesa_cbc_des3_ede_alg;
+extern struct skcipher_alg mv_cesa_ecb_aes_alg;
+extern struct skcipher_alg mv_cesa_cbc_aes_alg;
#endif /* __MARVELL_CESA_H__ */
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 098871a22a54..0ae84ec9e21c 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -32,23 +32,23 @@ struct mv_cesa_aes_ctx {
struct crypto_aes_ctx aes;
};
-struct mv_cesa_ablkcipher_dma_iter {
+struct mv_cesa_skcipher_dma_iter {
struct mv_cesa_dma_iter base;
struct mv_cesa_sg_dma_iter src;
struct mv_cesa_sg_dma_iter dst;
};
static inline void
-mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
- struct ablkcipher_request *req)
+mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
+ struct skcipher_request *req)
{
- mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
+ mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
}
static inline bool
-mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
+mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
{
iter->src.op_offset = 0;
iter->dst.op_offset = 0;
@@ -57,9 +57,9 @@ mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
}
static inline void
-mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
+mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
if (req->dst != req->src) {
dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
@@ -73,20 +73,20 @@ mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
mv_cesa_dma_cleanup(&creq->base);
}
-static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
+static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
- mv_cesa_ablkcipher_dma_cleanup(req);
+ mv_cesa_skcipher_dma_cleanup(req);
}
-static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
+static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_skcipher_std_req *sreq = &creq->std;
struct mv_cesa_engine *engine = creq->base.engine;
- size_t len = min_t(size_t, req->nbytes - sreq->offset,
+ size_t len = min_t(size_t, req->cryptlen - sreq->offset,
CESA_SA_SRAM_PAYLOAD_SIZE);
mv_cesa_adjust_op(engine, &sreq->op);
@@ -114,11 +114,11 @@ static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
-static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
- u32 status)
+static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
+ u32 status)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_skcipher_std_req *sreq = &creq->std;
struct mv_cesa_engine *engine = creq->base.engine;
size_t len;
@@ -127,122 +127,130 @@ static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
sreq->size, sreq->offset);
sreq->offset += len;
- if (sreq->offset < req->nbytes)
+ if (sreq->offset < req->cryptlen)
return -EINPROGRESS;
return 0;
}
-static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
- u32 status)
+static int mv_cesa_skcipher_process(struct crypto_async_request *req,
+ u32 status)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
struct mv_cesa_req *basereq = &creq->base;
if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
- return mv_cesa_ablkcipher_std_process(ablkreq, status);
+ return mv_cesa_skcipher_std_process(skreq, status);
return mv_cesa_dma_process(basereq, status);
}
-static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
+static void mv_cesa_skcipher_step(struct crypto_async_request *req)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_dma_step(&creq->base);
else
- mv_cesa_ablkcipher_std_step(ablkreq);
+ mv_cesa_skcipher_std_step(skreq);
}
static inline void
-mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
+mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_req *basereq = &creq->base;
mv_cesa_dma_prepare(basereq, basereq->engine);
}
static inline void
-mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
+mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_skcipher_std_req *sreq = &creq->std;
sreq->size = 0;
sreq->offset = 0;
}
-static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
- struct mv_cesa_engine *engine)
+static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
+ struct mv_cesa_engine *engine)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
creq->base.engine = engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
- mv_cesa_ablkcipher_dma_prepare(ablkreq);
+ mv_cesa_skcipher_dma_prepare(skreq);
else
- mv_cesa_ablkcipher_std_prepare(ablkreq);
+ mv_cesa_skcipher_std_prepare(skreq);
}
static inline void
-mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
+mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
- mv_cesa_ablkcipher_cleanup(ablkreq);
+ mv_cesa_skcipher_cleanup(skreq);
}
static void
-mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
+mv_cesa_skcipher_complete(struct crypto_async_request *req)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
struct mv_cesa_engine *engine = creq->base.engine;
unsigned int ivsize;
- atomic_sub(ablkreq->nbytes, &engine->load);
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
+ atomic_sub(skreq->cryptlen, &engine->load);
+ ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
struct mv_cesa_req *basereq;
basereq = &creq->base;
- memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
+ memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
ivsize);
} else {
- memcpy_fromio(ablkreq->info,
+ memcpy_fromio(skreq->iv,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
ivsize);
}
}
-static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
- .step = mv_cesa_ablkcipher_step,
- .process = mv_cesa_ablkcipher_process,
- .cleanup = mv_cesa_ablkcipher_req_cleanup,
- .complete = mv_cesa_ablkcipher_complete,
+static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
+ .step = mv_cesa_skcipher_step,
+ .process = mv_cesa_skcipher_process,
+ .cleanup = mv_cesa_skcipher_req_cleanup,
+ .complete = mv_cesa_skcipher_complete,
};
-static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
+static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
{
- struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ void *ctx = crypto_tfm_ctx(tfm);
+
+ memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
+}
+
+static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
+{
+ struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
+ ctx->ops = &mv_cesa_skcipher_req_ops;
- tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct mv_cesa_skcipher_req));
return 0;
}
-static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
int remaining;
int offset;
@@ -251,7 +259,7 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ret = crypto_aes_expand_key(&ctx->aes, key, len);
if (ret) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return ret;
}
@@ -264,16 +272,16 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
u32 tmp[DES_EXPKEY_WORDS];
int ret;
if (len != DES_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -288,14 +296,14 @@ static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
+static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
if (len != DES3_EDE_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -304,14 +312,14 @@ static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
- const struct mv_cesa_op_ctx *op_templ)
+static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
+ const struct mv_cesa_op_ctx *op_templ)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct mv_cesa_req *basereq = &creq->base;
- struct mv_cesa_ablkcipher_dma_iter iter;
+ struct mv_cesa_skcipher_dma_iter iter;
bool skip_ctx = false;
int ret;
unsigned int ivsize;
@@ -339,7 +347,7 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
}
mv_cesa_tdma_desc_iter_init(&basereq->chain);
- mv_cesa_ablkcipher_req_iter_init(&iter, req);
+ mv_cesa_skcipher_req_iter_init(&iter, req);
do {
struct mv_cesa_op_ctx *op;
@@ -370,10 +378,10 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
if (ret)
goto err_free_tdma;
- } while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
+ } while (mv_cesa_skcipher_req_iter_next_op(&iter));
/* Add output data for IV */
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
+ ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
CESA_SA_DATA_SRAM_OFFSET,
CESA_TDMA_SRC_IN_SRAM, flags);
@@ -399,11 +407,11 @@ err_unmap_src:
}
static inline int
-mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
- const struct mv_cesa_op_ctx *op_templ)
+mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
+ const struct mv_cesa_op_ctx *op_templ)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_skcipher_std_req *sreq = &creq->std;
struct mv_cesa_req *basereq = &creq->base;
sreq->op = *op_templ;
@@ -414,23 +422,23 @@ mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
return 0;
}
-static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
- struct mv_cesa_op_ctx *tmpl)
+static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
+ struct mv_cesa_op_ctx *tmpl)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int blksize = crypto_skcipher_blocksize(tfm);
int ret;
- if (!IS_ALIGNED(req->nbytes, blksize))
+ if (!IS_ALIGNED(req->cryptlen, blksize))
return -EINVAL;
- creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
+ creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (creq->src_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of src SG");
return creq->src_nents;
}
- creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (creq->dst_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of dst SG");
return creq->dst_nents;
@@ -440,36 +448,36 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
CESA_SA_DESC_CFG_OP_MSK);
if (cesa_dev->caps->has_tdma)
- ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
+ ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
else
- ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
+ ret = mv_cesa_skcipher_std_req_init(req, tmpl);
return ret;
}
-static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req,
- struct mv_cesa_op_ctx *tmpl)
+static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
+ struct mv_cesa_op_ctx *tmpl)
{
int ret;
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_engine *engine;
- ret = mv_cesa_ablkcipher_req_init(req, tmpl);
+ ret = mv_cesa_skcipher_req_init(req, tmpl);
if (ret)
return ret;
- engine = mv_cesa_select_engine(req->nbytes);
- mv_cesa_ablkcipher_prepare(&req->base, engine);
+ engine = mv_cesa_select_engine(req->cryptlen);
+ mv_cesa_skcipher_prepare(&req->base, engine);
ret = mv_cesa_queue_req(&req->base, &creq->base);
if (mv_cesa_req_needs_cleanup(&req->base, ret))
- mv_cesa_ablkcipher_cleanup(req);
+ mv_cesa_skcipher_cleanup(req);
return ret;
}
-static int mv_cesa_des_op(struct ablkcipher_request *req,
+static int mv_cesa_des_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -479,10 +487,10 @@ static int mv_cesa_des_op(struct ablkcipher_request *req,
memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
- return mv_cesa_ablkcipher_queue_req(req, tmpl);
+ return mv_cesa_skcipher_queue_req(req, tmpl);
}
-static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -493,7 +501,7 @@ static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
return mv_cesa_des_op(req, &tmpl);
}
-static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -504,41 +512,38 @@ static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
return mv_cesa_des_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_ecb_des_alg = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "mv-ecb-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = mv_cesa_des_setkey,
- .encrypt = mv_cesa_ecb_des_encrypt,
- .decrypt = mv_cesa_ecb_des_decrypt,
- },
+struct skcipher_alg mv_cesa_ecb_des_alg = {
+ .setkey = mv_cesa_des_setkey,
+ .encrypt = mv_cesa_ecb_des_encrypt,
+ .decrypt = mv_cesa_ecb_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "mv-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_cbc_des_op(struct ablkcipher_request *req,
+static int mv_cesa_cbc_des_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
- memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE);
+ memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
return mv_cesa_des_op(req, tmpl);
}
-static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -547,7 +552,7 @@ static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_des_op(req, &tmpl);
}
-static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -556,31 +561,28 @@ static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_des_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_cbc_des_alg = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "mv-cbc-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = mv_cesa_des_setkey,
- .encrypt = mv_cesa_cbc_des_encrypt,
- .decrypt = mv_cesa_cbc_des_decrypt,
- },
+struct skcipher_alg mv_cesa_cbc_des_alg = {
+ .setkey = mv_cesa_des_setkey,
+ .encrypt = mv_cesa_cbc_des_encrypt,
+ .decrypt = mv_cesa_cbc_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "mv-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_des3_op(struct ablkcipher_request *req,
+static int mv_cesa_des3_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -590,10 +592,10 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
- return mv_cesa_ablkcipher_queue_req(req, tmpl);
+ return mv_cesa_skcipher_queue_req(req, tmpl);
}
-static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -605,7 +607,7 @@ static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
return mv_cesa_des3_op(req, &tmpl);
}
-static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -617,39 +619,36 @@ static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
return mv_cesa_des3_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_ecb_des3_ede_alg = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "mv-ecb-des3-ede",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = mv_cesa_des3_ede_setkey,
- .encrypt = mv_cesa_ecb_des3_ede_encrypt,
- .decrypt = mv_cesa_ecb_des3_ede_decrypt,
- },
+struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
+ .setkey = mv_cesa_des3_ede_setkey,
+ .encrypt = mv_cesa_ecb_des3_ede_encrypt,
+ .decrypt = mv_cesa_ecb_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "mv-ecb-des3-ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req,
+static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
- memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE);
+ memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
return mv_cesa_des3_op(req, tmpl);
}
-static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -661,7 +660,7 @@ static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_des3_op(req, &tmpl);
}
-static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -673,31 +672,28 @@ static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_des3_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_cbc_des3_ede_alg = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "mv-cbc-des3-ede",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = mv_cesa_des3_ede_setkey,
- .encrypt = mv_cesa_cbc_des3_ede_encrypt,
- .decrypt = mv_cesa_cbc_des3_ede_decrypt,
- },
+struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
+ .setkey = mv_cesa_des3_ede_setkey,
+ .encrypt = mv_cesa_cbc_des3_ede_encrypt,
+ .decrypt = mv_cesa_cbc_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "mv-cbc-des3-ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_aes_op(struct ablkcipher_request *req,
+static int mv_cesa_aes_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -724,10 +720,10 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
CESA_SA_DESC_CFG_CRYPTM_MSK |
CESA_SA_DESC_CFG_AES_LEN_MSK);
- return mv_cesa_ablkcipher_queue_req(req, tmpl);
+ return mv_cesa_skcipher_queue_req(req, tmpl);
}
-static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -738,7 +734,7 @@ static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
return mv_cesa_aes_op(req, &tmpl);
}
-static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -749,40 +745,37 @@ static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
return mv_cesa_aes_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "mv-ecb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mv_cesa_aes_setkey,
- .encrypt = mv_cesa_ecb_aes_encrypt,
- .decrypt = mv_cesa_ecb_aes_decrypt,
- },
+struct skcipher_alg mv_cesa_ecb_aes_alg = {
+ .setkey = mv_cesa_aes_setkey,
+ .encrypt = mv_cesa_ecb_aes_encrypt,
+ .decrypt = mv_cesa_ecb_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "mv-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
+static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
- memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
+ memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
return mv_cesa_aes_op(req, tmpl);
}
-static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -791,7 +784,7 @@ static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_aes_op(req, &tmpl);
}
-static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -800,26 +793,23 @@ static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_aes_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "mv-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mv_cesa_aes_setkey,
- .encrypt = mv_cesa_cbc_aes_encrypt,
- .decrypt = mv_cesa_cbc_aes_decrypt,
- },
+struct skcipher_alg mv_cesa_cbc_aes_alg = {
+ .setkey = mv_cesa_aes_setkey,
+ .encrypt = mv_cesa_cbc_aes_encrypt,
+ .decrypt = mv_cesa_cbc_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "mv-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index c76375ff376d..d0ef171c18df 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -304,10 +304,7 @@ int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
struct mv_cesa_tdma_desc *tdma;
tdma = mv_cesa_dma_add_desc(chain, flags);
- if (IS_ERR(tdma))
- return PTR_ERR(tdma);
-
- return 0;
+ return PTR_ERR_OR_ZERO(tdma);
}
int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 9e845e866dec..c2058cf59f57 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -13,6 +13,7 @@
*/
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include "mtk-platform.h"
#define AES_QUEUE_SIZE 512
@@ -137,11 +138,6 @@ struct mtk_aes_gcm_ctx {
struct crypto_skcipher *ctr;
};
-struct mtk_aes_gcm_setkey_result {
- int err;
- struct completion completion;
-};
-
struct mtk_aes_drv {
struct list_head dev_list;
/* Device list lock */
@@ -928,25 +924,19 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
{
struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
+ /* Empty messages are not supported yet */
+ if (!gctx->textlen && !req->assoclen)
+ return -EINVAL;
+
rctx->mode = AES_FLAGS_GCM | mode;
return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
&req->base);
}
-static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err)
-{
- struct mtk_aes_gcm_setkey_result *result = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- result->err = err;
- complete(&result->completion);
-}
-
/*
* Because of the hardware limitation, we need to pre-calculate key(H)
* for the GHASH operation. The result of the encryption operation
@@ -962,7 +952,7 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
u32 hash[4];
u8 iv[8];
- struct mtk_aes_gcm_setkey_result result;
+ struct crypto_wait wait;
struct scatterlist sg[1];
struct skcipher_request req;
@@ -1002,22 +992,17 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (!data)
return -ENOMEM;
- init_completion(&data->result.completion);
+ crypto_init_wait(&data->wait);
sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
skcipher_request_set_tfm(&data->req, ctr);
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- mtk_gcm_setkey_done, &data->result);
+ crypto_req_done, &data->wait);
skcipher_request_set_crypt(&data->req, data->sg, data->sg,
AES_BLOCK_SIZE, data->iv);
- err = crypto_skcipher_encrypt(&data->req);
- if (err == -EINPROGRESS || err == -EBUSY) {
- err = wait_for_completion_interruptible(
- &data->result.completion);
- if (!err)
- err = data->result.err;
- }
+ err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
+ &data->wait);
if (err)
goto out;
@@ -1098,7 +1083,7 @@ static struct aead_alg aes_gcm_alg = {
.decrypt = mtk_aes_gcm_decrypt,
.init = mtk_aes_gcm_init,
.exit = mtk_aes_gcm_exit,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
deleted file mode 100644
index bf25f415eea6..000000000000
--- a/drivers/crypto/mv_cesa.c
+++ /dev/null
@@ -1,1216 +0,0 @@
-/*
- * Support for Marvell's crypto engine which can be found on some Orion5X
- * boards.
- *
- * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
- * License: GPLv2
- *
- */
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <linux/crypto.h>
-#include <linux/genalloc.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kthread.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <crypto/hmac.h>
-#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-
-#include "mv_cesa.h"
-
-#define MV_CESA "MV-CESA:"
-#define MAX_HW_HASH_SIZE 0xFFFF
-#define MV_CESA_EXPIRE 500 /* msec */
-
-#define MV_CESA_DEFAULT_SRAM_SIZE 2048
-
-/*
- * STM:
- * /---------------------------------------\
- * | | request complete
- * \./ |
- * IDLE -> new request -> BUSY -> done -> DEQUEUE
- * /°\ |
- * | | more scatter entries
- * \________________/
- */
-enum engine_status {
- ENGINE_IDLE,
- ENGINE_BUSY,
- ENGINE_W_DEQUEUE,
-};
-
-/**
- * struct req_progress - used for every crypt request
- * @src_sg_it: sg iterator for src
- * @dst_sg_it: sg iterator for dst
- * @sg_src_left: bytes left in src to process (scatter list)
- * @src_start: offset to add to src start position (scatter list)
- * @crypt_len: length of current hw crypt/hash process
- * @hw_nbytes: total bytes to process in hw for this request
- * @copy_back: whether to copy data back (crypt) or not (hash)
- * @sg_dst_left: bytes left dst to process in this scatter list
- * @dst_start: offset to add to dst start position (scatter list)
- * @hw_processed_bytes: number of bytes processed by hw (request).
- *
- * sg helper are used to iterate over the scatterlist. Since the size of the
- * SRAM may be less than the scatter size, this struct struct is used to keep
- * track of progress within current scatterlist.
- */
-struct req_progress {
- struct sg_mapping_iter src_sg_it;
- struct sg_mapping_iter dst_sg_it;
- void (*complete) (void);
- void (*process) (int is_first);
-
- /* src mostly */
- int sg_src_left;
- int src_start;
- int crypt_len;
- int hw_nbytes;
- /* dst mostly */
- int copy_back;
- int sg_dst_left;
- int dst_start;
- int hw_processed_bytes;
-};
-
-struct crypto_priv {
- void __iomem *reg;
- void __iomem *sram;
- struct gen_pool *sram_pool;
- dma_addr_t sram_dma;
- int irq;
- struct clk *clk;
- struct task_struct *queue_th;
-
- /* the lock protects queue and eng_st */
- spinlock_t lock;
- struct crypto_queue queue;
- enum engine_status eng_st;
- struct timer_list completion_timer;
- struct crypto_async_request *cur_req;
- struct req_progress p;
- int max_req_size;
- int sram_size;
- int has_sha1;
- int has_hmac_sha1;
-};
-
-static struct crypto_priv *cpg;
-
-struct mv_ctx {
- u8 aes_enc_key[AES_KEY_LEN];
- u32 aes_dec_key[8];
- int key_len;
- u32 need_calc_aes_dkey;
-};
-
-enum crypto_op {
- COP_AES_ECB,
- COP_AES_CBC,
-};
-
-struct mv_req_ctx {
- enum crypto_op op;
- int decrypt;
-};
-
-enum hash_op {
- COP_SHA1,
- COP_HMAC_SHA1
-};
-
-struct mv_tfm_hash_ctx {
- struct crypto_shash *fallback;
- struct crypto_shash *base_hash;
- u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
- int count_add;
- enum hash_op op;
-};
-
-struct mv_req_hash_ctx {
- u64 count;
- u32 state[SHA1_DIGEST_SIZE / 4];
- u8 buffer[SHA1_BLOCK_SIZE];
- int first_hash; /* marks that we don't have previous state */
- int last_chunk; /* marks that this is the 'final' request */
- int extra_bytes; /* unprocessed bytes in buffer */
- enum hash_op op;
- int count_add;
-};
-
-static void mv_completion_timer_callback(unsigned long unused)
-{
- int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0;
-
- printk(KERN_ERR MV_CESA
- "completion timer expired (CESA %sactive), cleaning up.\n",
- active ? "" : "in");
-
- del_timer(&cpg->completion_timer);
- writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD);
- while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC)
- printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__);
- cpg->eng_st = ENGINE_W_DEQUEUE;
- wake_up_process(cpg->queue_th);
-}
-
-static void mv_setup_timer(void)
-{
- setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0);
- mod_timer(&cpg->completion_timer,
- jiffies + msecs_to_jiffies(MV_CESA_EXPIRE));
-}
-
-static void compute_aes_dec_key(struct mv_ctx *ctx)
-{
- struct crypto_aes_ctx gen_aes_key;
- int key_pos;
-
- if (!ctx->need_calc_aes_dkey)
- return;
-
- crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
-
- key_pos = ctx->key_len + 24;
- memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
- switch (ctx->key_len) {
- case AES_KEYSIZE_256:
- key_pos -= 2;
- /* fall */
- case AES_KEYSIZE_192:
- key_pos -= 2;
- memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
- 4 * 4);
- break;
- }
- ctx->need_calc_aes_dkey = 0;
-}
-
-static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
- unsigned int len)
-{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
-
- switch (len) {
- case AES_KEYSIZE_128:
- case AES_KEYSIZE_192:
- case AES_KEYSIZE_256:
- break;
- default:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- ctx->key_len = len;
- ctx->need_calc_aes_dkey = 1;
-
- memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
- return 0;
-}
-
-static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
-{
- int ret;
- void *sbuf;
- int copy_len;
-
- while (len) {
- if (!p->sg_src_left) {
- ret = sg_miter_next(&p->src_sg_it);
- BUG_ON(!ret);
- p->sg_src_left = p->src_sg_it.length;
- p->src_start = 0;
- }
-
- sbuf = p->src_sg_it.addr + p->src_start;
-
- copy_len = min(p->sg_src_left, len);
- memcpy(dbuf, sbuf, copy_len);
-
- p->src_start += copy_len;
- p->sg_src_left -= copy_len;
-
- len -= copy_len;
- dbuf += copy_len;
- }
-}
-
-static void setup_data_in(void)
-{
- struct req_progress *p = &cpg->p;
- int data_in_sram =
- min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
- copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
- data_in_sram - p->crypt_len);
- p->crypt_len = data_in_sram;
-}
-
-static void mv_process_current_q(int first_block)
-{
- struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
- struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
- struct sec_accel_config op;
-
- switch (req_ctx->op) {
- case COP_AES_ECB:
- op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
- break;
- case COP_AES_CBC:
- default:
- op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
- op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
- ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
- if (first_block)
- memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
- break;
- }
- if (req_ctx->decrypt) {
- op.config |= CFG_DIR_DEC;
- memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
- AES_KEY_LEN);
- } else {
- op.config |= CFG_DIR_ENC;
- memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
- AES_KEY_LEN);
- }
-
- switch (ctx->key_len) {
- case AES_KEYSIZE_128:
- op.config |= CFG_AES_LEN_128;
- break;
- case AES_KEYSIZE_192:
- op.config |= CFG_AES_LEN_192;
- break;
- case AES_KEYSIZE_256:
- op.config |= CFG_AES_LEN_256;
- break;
- }
- op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
- ENC_P_DST(SRAM_DATA_OUT_START);
- op.enc_key_p = SRAM_DATA_KEY_P;
-
- setup_data_in();
- op.enc_len = cpg->p.crypt_len;
- memcpy(cpg->sram + SRAM_CONFIG, &op,
- sizeof(struct sec_accel_config));
-
- /* GO */
- mv_setup_timer();
- writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
-}
-
-static void mv_crypto_algo_completion(void)
-{
- struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- sg_miter_stop(&cpg->p.src_sg_it);
- sg_miter_stop(&cpg->p.dst_sg_it);
-
- if (req_ctx->op != COP_AES_CBC)
- return ;
-
- memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
-}
-
-static void mv_process_hash_current(int first_block)
-{
- struct ahash_request *req = ahash_request_cast(cpg->cur_req);
- const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
- struct req_progress *p = &cpg->p;
- struct sec_accel_config op = { 0 };
- int is_last;
-
- switch (req_ctx->op) {
- case COP_SHA1:
- default:
- op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
- break;
- case COP_HMAC_SHA1:
- op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
- memcpy(cpg->sram + SRAM_HMAC_IV_IN,
- tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
- break;
- }
-
- op.mac_src_p =
- MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
- req_ctx->
- count);
-
- setup_data_in();
-
- op.mac_digest =
- MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
- op.mac_iv =
- MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
- MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
-
- is_last = req_ctx->last_chunk
- && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
- && (req_ctx->count <= MAX_HW_HASH_SIZE);
- if (req_ctx->first_hash) {
- if (is_last)
- op.config |= CFG_NOT_FRAG;
- else
- op.config |= CFG_FIRST_FRAG;
-
- req_ctx->first_hash = 0;
- } else {
- if (is_last)
- op.config |= CFG_LAST_FRAG;
- else
- op.config |= CFG_MID_FRAG;
-
- if (first_block) {
- writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
- writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
- writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
- writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
- writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
- }
- }
-
- memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
-
- /* GO */
- mv_setup_timer();
- writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
-}
-
-static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
- struct shash_desc *desc)
-{
- int i;
- struct sha1_state shash_state;
-
- shash_state.count = ctx->count + ctx->count_add;
- for (i = 0; i < 5; i++)
- shash_state.state[i] = ctx->state[i];
- memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
- return crypto_shash_import(desc, &shash_state);
-}
-
-static int mv_hash_final_fallback(struct ahash_request *req)
-{
- const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
- SHASH_DESC_ON_STACK(shash, tfm_ctx->fallback);
- int rc;
-
- shash->tfm = tfm_ctx->fallback;
- shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- if (unlikely(req_ctx->first_hash)) {
- crypto_shash_init(shash);
- crypto_shash_update(shash, req_ctx->buffer,
- req_ctx->extra_bytes);
- } else {
- /* only SHA1 for now....
- */
- rc = mv_hash_import_sha1_ctx(req_ctx, shash);
- if (rc)
- goto out;
- }
- rc = crypto_shash_final(shash, req->result);
-out:
- return rc;
-}
-
-static void mv_save_digest_state(struct mv_req_hash_ctx *ctx)
-{
- ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
- ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
- ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
- ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
- ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
-}
-
-static void mv_hash_algo_completion(void)
-{
- struct ahash_request *req = ahash_request_cast(cpg->cur_req);
- struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-
- if (ctx->extra_bytes)
- copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
- sg_miter_stop(&cpg->p.src_sg_it);
-
- if (likely(ctx->last_chunk)) {
- if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
- memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
- crypto_ahash_digestsize(crypto_ahash_reqtfm
- (req)));
- } else {
- mv_save_digest_state(ctx);
- mv_hash_final_fallback(req);
- }
- } else {
- mv_save_digest_state(ctx);
- }
-}
-
-static void dequeue_complete_req(void)
-{
- struct crypto_async_request *req = cpg->cur_req;
- void *buf;
- int ret;
- cpg->p.hw_processed_bytes += cpg->p.crypt_len;
- if (cpg->p.copy_back) {
- int need_copy_len = cpg->p.crypt_len;
- int sram_offset = 0;
- do {
- int dst_copy;
-
- if (!cpg->p.sg_dst_left) {
- ret = sg_miter_next(&cpg->p.dst_sg_it);
- BUG_ON(!ret);
- cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
- cpg->p.dst_start = 0;
- }
-
- buf = cpg->p.dst_sg_it.addr;
- buf += cpg->p.dst_start;
-
- dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
-
- memcpy(buf,
- cpg->sram + SRAM_DATA_OUT_START + sram_offset,
- dst_copy);
- sram_offset += dst_copy;
- cpg->p.sg_dst_left -= dst_copy;
- need_copy_len -= dst_copy;
- cpg->p.dst_start += dst_copy;
- } while (need_copy_len > 0);
- }
-
- cpg->p.crypt_len = 0;
-
- BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
- if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
- /* process next scatter list entry */
- cpg->eng_st = ENGINE_BUSY;
- cpg->p.process(0);
- } else {
- cpg->p.complete();
- cpg->eng_st = ENGINE_IDLE;
- local_bh_disable();
- req->complete(req, 0);
- local_bh_enable();
- }
-}
-
-static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
-{
- int i = 0;
- size_t cur_len;
-
- while (sl) {
- cur_len = sl[i].length;
- ++i;
- if (total_bytes > cur_len)
- total_bytes -= cur_len;
- else
- break;
- }
-
- return i;
-}
-
-static void mv_start_new_crypt_req(struct ablkcipher_request *req)
-{
- struct req_progress *p = &cpg->p;
- int num_sgs;
-
- cpg->cur_req = &req->base;
- memset(p, 0, sizeof(struct req_progress));
- p->hw_nbytes = req->nbytes;
- p->complete = mv_crypto_algo_completion;
- p->process = mv_process_current_q;
- p->copy_back = 1;
-
- num_sgs = count_sgs(req->src, req->nbytes);
- sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
-
- num_sgs = count_sgs(req->dst, req->nbytes);
- sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
-
- mv_process_current_q(1);
-}
-
-static void mv_start_new_hash_req(struct ahash_request *req)
-{
- struct req_progress *p = &cpg->p;
- struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
- int num_sgs, hw_bytes, old_extra_bytes, rc;
- cpg->cur_req = &req->base;
- memset(p, 0, sizeof(struct req_progress));
- hw_bytes = req->nbytes + ctx->extra_bytes;
- old_extra_bytes = ctx->extra_bytes;
-
- ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
- if (ctx->extra_bytes != 0
- && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
- hw_bytes -= ctx->extra_bytes;
- else
- ctx->extra_bytes = 0;
-
- num_sgs = count_sgs(req->src, req->nbytes);
- sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
-
- if (hw_bytes) {
- p->hw_nbytes = hw_bytes;
- p->complete = mv_hash_algo_completion;
- p->process = mv_process_hash_current;
-
- if (unlikely(old_extra_bytes)) {
- memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
- old_extra_bytes);
- p->crypt_len = old_extra_bytes;
- }
-
- mv_process_hash_current(1);
- } else {
- copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
- ctx->extra_bytes - old_extra_bytes);
- sg_miter_stop(&p->src_sg_it);
- if (ctx->last_chunk)
- rc = mv_hash_final_fallback(req);
- else
- rc = 0;
- cpg->eng_st = ENGINE_IDLE;
- local_bh_disable();
- req->base.complete(&req->base, rc);
- local_bh_enable();
- }
-}
-
-static int queue_manag(void *data)
-{
- cpg->eng_st = ENGINE_IDLE;
- do {
- struct crypto_async_request *async_req = NULL;
- struct crypto_async_request *backlog = NULL;
-
- __set_current_state(TASK_INTERRUPTIBLE);
-
- if (cpg->eng_st == ENGINE_W_DEQUEUE)
- dequeue_complete_req();
-
- spin_lock_irq(&cpg->lock);
- if (cpg->eng_st == ENGINE_IDLE) {
- backlog = crypto_get_backlog(&cpg->queue);
- async_req = crypto_dequeue_request(&cpg->queue);
- if (async_req) {
- BUG_ON(cpg->eng_st != ENGINE_IDLE);
- cpg->eng_st = ENGINE_BUSY;
- }
- }
- spin_unlock_irq(&cpg->lock);
-
- if (backlog) {
- backlog->complete(backlog, -EINPROGRESS);
- backlog = NULL;
- }
-
- if (async_req) {
- if (crypto_tfm_alg_type(async_req->tfm) !=
- CRYPTO_ALG_TYPE_AHASH) {
- struct ablkcipher_request *req =
- ablkcipher_request_cast(async_req);
- mv_start_new_crypt_req(req);
- } else {
- struct ahash_request *req =
- ahash_request_cast(async_req);
- mv_start_new_hash_req(req);
- }
- async_req = NULL;
- }
-
- schedule();
-
- } while (!kthread_should_stop());
- return 0;
-}
-
-static int mv_handle_req(struct crypto_async_request *req)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&cpg->lock, flags);
- ret = crypto_enqueue_request(&cpg->queue, req);
- spin_unlock_irqrestore(&cpg->lock, flags);
- wake_up_process(cpg->queue_th);
- return ret;
-}
-
-static int mv_enc_aes_ecb(struct ablkcipher_request *req)
-{
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- req_ctx->op = COP_AES_ECB;
- req_ctx->decrypt = 0;
-
- return mv_handle_req(&req->base);
-}
-
-static int mv_dec_aes_ecb(struct ablkcipher_request *req)
-{
- struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- req_ctx->op = COP_AES_ECB;
- req_ctx->decrypt = 1;
-
- compute_aes_dec_key(ctx);
- return mv_handle_req(&req->base);
-}
-
-static int mv_enc_aes_cbc(struct ablkcipher_request *req)
-{
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- req_ctx->op = COP_AES_CBC;
- req_ctx->decrypt = 0;
-
- return mv_handle_req(&req->base);
-}
-
-static int mv_dec_aes_cbc(struct ablkcipher_request *req)
-{
- struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- req_ctx->op = COP_AES_CBC;
- req_ctx->decrypt = 1;
-
- compute_aes_dec_key(ctx);
- return mv_handle_req(&req->base);
-}
-
-static int mv_cra_init(struct crypto_tfm *tfm)
-{
- tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
- return 0;
-}
-
-static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
- int is_last, unsigned int req_len,
- int count_add)
-{
- memset(ctx, 0, sizeof(*ctx));
- ctx->op = op;
- ctx->count = req_len;
- ctx->first_hash = 1;
- ctx->last_chunk = is_last;
- ctx->count_add = count_add;
-}
-
-static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
- unsigned req_len)
-{
- ctx->last_chunk = is_last;
- ctx->count += req_len;
-}
-
-static int mv_hash_init(struct ahash_request *req)
-{
- const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
- mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
- tfm_ctx->count_add);
- return 0;
-}
-
-static int mv_hash_update(struct ahash_request *req)
-{
- if (!req->nbytes)
- return 0;
-
- mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
- return mv_handle_req(&req->base);
-}
-
-static int mv_hash_final(struct ahash_request *req)
-{
- struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-
- ahash_request_set_crypt(req, NULL, req->result, 0);
- mv_update_hash_req_ctx(ctx, 1, 0);
- return mv_handle_req(&req->base);
-}
-
-static int mv_hash_finup(struct ahash_request *req)
-{
- mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
- return mv_handle_req(&req->base);
-}
-
-static int mv_hash_digest(struct ahash_request *req)
-{
- const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
- mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
- req->nbytes, tfm_ctx->count_add);
- return mv_handle_req(&req->base);
-}
-
-static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
- const void *ostate)
-{
- const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
- int i;
- for (i = 0; i < 5; i++) {
- ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
- ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
- }
-}
-
-static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
- unsigned int keylen)
-{
- int rc;
- struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
- int bs, ds, ss;
-
- if (!ctx->base_hash)
- return 0;
-
- rc = crypto_shash_setkey(ctx->fallback, key, keylen);
- if (rc)
- return rc;
-
- /* Can't see a way to extract the ipad/opad from the fallback tfm
- so I'm basically copying code from the hmac module */
- bs = crypto_shash_blocksize(ctx->base_hash);
- ds = crypto_shash_digestsize(ctx->base_hash);
- ss = crypto_shash_statesize(ctx->base_hash);
-
- {
- SHASH_DESC_ON_STACK(shash, ctx->base_hash);
-
- unsigned int i;
- char ipad[ss];
- char opad[ss];
-
- shash->tfm = ctx->base_hash;
- shash->flags = crypto_shash_get_flags(ctx->base_hash) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- if (keylen > bs) {
- int err;
-
- err =
- crypto_shash_digest(shash, key, keylen, ipad);
- if (err)
- return err;
-
- keylen = ds;
- } else
- memcpy(ipad, key, keylen);
-
- memset(ipad + keylen, 0, bs - keylen);
- memcpy(opad, ipad, bs);
-
- for (i = 0; i < bs; i++) {
- ipad[i] ^= HMAC_IPAD_VALUE;
- opad[i] ^= HMAC_OPAD_VALUE;
- }
-
- rc = crypto_shash_init(shash) ? :
- crypto_shash_update(shash, ipad, bs) ? :
- crypto_shash_export(shash, ipad) ? :
- crypto_shash_init(shash) ? :
- crypto_shash_update(shash, opad, bs) ? :
- crypto_shash_export(shash, opad);
-
- if (rc == 0)
- mv_hash_init_ivs(ctx, ipad, opad);
-
- return rc;
- }
-}
-
-static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
- enum hash_op op, int count_add)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_shash *fallback_tfm = NULL;
- struct crypto_shash *base_hash = NULL;
- int err = -ENOMEM;
-
- ctx->op = op;
- ctx->count_add = count_add;
-
- /* Allocate a fallback and abort if it failed. */
- fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- printk(KERN_WARNING MV_CESA
- "Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
- ctx->fallback = fallback_tfm;
-
- if (base_hash_name) {
- /* Allocate a hash to compute the ipad/opad of hmac. */
- base_hash = crypto_alloc_shash(base_hash_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(base_hash)) {
- printk(KERN_WARNING MV_CESA
- "Base driver '%s' could not be loaded!\n",
- base_hash_name);
- err = PTR_ERR(base_hash);
- goto err_bad_base;
- }
- }
- ctx->base_hash = base_hash;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct mv_req_hash_ctx) +
- crypto_shash_descsize(ctx->fallback));
- return 0;
-err_bad_base:
- crypto_free_shash(fallback_tfm);
-out:
- return err;
-}
-
-static void mv_cra_hash_exit(struct crypto_tfm *tfm)
-{
- struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_shash(ctx->fallback);
- if (ctx->base_hash)
- crypto_free_shash(ctx->base_hash);
-}
-
-static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
-{
- return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
-}
-
-static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
-{
- return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
-}
-
-static irqreturn_t crypto_int(int irq, void *priv)
-{
- u32 val;
-
- val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
- if (!(val & SEC_INT_ACCEL0_DONE))
- return IRQ_NONE;
-
- if (!del_timer(&cpg->completion_timer)) {
- printk(KERN_WARNING MV_CESA
- "got an interrupt but no pending timer?\n");
- }
- val &= ~SEC_INT_ACCEL0_DONE;
- writel(val, cpg->reg + FPGA_INT_STATUS);
- writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
- BUG_ON(cpg->eng_st != ENGINE_BUSY);
- cpg->eng_st = ENGINE_W_DEQUEUE;
- wake_up_process(cpg->queue_th);
- return IRQ_HANDLED;
-}
-
-static struct crypto_alg mv_aes_alg_ecb = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "mv-ecb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 16,
- .cra_ctxsize = sizeof(struct mv_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mv_setkey_aes,
- .encrypt = mv_enc_aes_ecb,
- .decrypt = mv_dec_aes_ecb,
- },
- },
-};
-
-static struct crypto_alg mv_aes_alg_cbc = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "mv-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cra_init,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mv_setkey_aes,
- .encrypt = mv_enc_aes_cbc,
- .decrypt = mv_dec_aes_cbc,
- },
- },
-};
-
-static struct ahash_alg mv_sha1_alg = {
- .init = mv_hash_init,
- .update = mv_hash_update,
- .final = mv_hash_final,
- .finup = mv_hash_finup,
- .digest = mv_hash_digest,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "mv-sha1",
- .cra_priority = 300,
- .cra_flags =
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
- .cra_init = mv_cra_hash_sha1_init,
- .cra_exit = mv_cra_hash_exit,
- .cra_module = THIS_MODULE,
- }
- }
-};
-
-static struct ahash_alg mv_hmac_sha1_alg = {
- .init = mv_hash_init,
- .update = mv_hash_update,
- .final = mv_hash_final,
- .finup = mv_hash_finup,
- .digest = mv_hash_digest,
- .setkey = mv_hash_setkey,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "mv-hmac-sha1",
- .cra_priority = 300,
- .cra_flags =
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
- .cra_init = mv_cra_hash_hmac_sha1_init,
- .cra_exit = mv_cra_hash_exit,
- .cra_module = THIS_MODULE,
- }
- }
-};
-
-static int mv_cesa_get_sram(struct platform_device *pdev,
- struct crypto_priv *cp)
-{
- struct resource *res;
- u32 sram_size = MV_CESA_DEFAULT_SRAM_SIZE;
-
- of_property_read_u32(pdev->dev.of_node, "marvell,crypto-sram-size",
- &sram_size);
-
- cp->sram_size = sram_size;
- cp->sram_pool = of_gen_pool_get(pdev->dev.of_node,
- "marvell,crypto-srams", 0);
- if (cp->sram_pool) {
- cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size,
- &cp->sram_dma);
- if (cp->sram)
- return 0;
-
- return -ENOMEM;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "sram");
- if (!res || resource_size(res) < cp->sram_size)
- return -EINVAL;
-
- cp->sram = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(cp->sram))
- return PTR_ERR(cp->sram);
-
- return 0;
-}
-
-static int mv_probe(struct platform_device *pdev)
-{
- struct crypto_priv *cp;
- struct resource *res;
- int irq;
- int ret;
-
- if (cpg) {
- printk(KERN_ERR MV_CESA "Second crypto dev?\n");
- return -EEXIST;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- if (!res)
- return -ENXIO;
-
- cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL);
- if (!cp)
- return -ENOMEM;
-
- spin_lock_init(&cp->lock);
- crypto_init_queue(&cp->queue, 50);
- cp->reg = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(cp->reg)) {
- ret = PTR_ERR(cp->reg);
- goto err;
- }
-
- ret = mv_cesa_get_sram(pdev, cp);
- if (ret)
- goto err;
-
- cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err;
- }
- cp->irq = irq;
-
- platform_set_drvdata(pdev, cp);
- cpg = cp;
-
- cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
- if (IS_ERR(cp->queue_th)) {
- ret = PTR_ERR(cp->queue_th);
- goto err;
- }
-
- ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
- cp);
- if (ret)
- goto err_thread;
-
- /* Not all platforms can gate the clock, so it is not
- an error if the clock does not exists. */
- cp->clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(cp->clk))
- clk_prepare_enable(cp->clk);
-
- writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);
- writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
- writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
- writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
-
- ret = crypto_register_alg(&mv_aes_alg_ecb);
- if (ret) {
- printk(KERN_WARNING MV_CESA
- "Could not register aes-ecb driver\n");
- goto err_irq;
- }
-
- ret = crypto_register_alg(&mv_aes_alg_cbc);
- if (ret) {
- printk(KERN_WARNING MV_CESA
- "Could not register aes-cbc driver\n");
- goto err_unreg_ecb;
- }
-
- ret = crypto_register_ahash(&mv_sha1_alg);
- if (ret == 0)
- cpg->has_sha1 = 1;
- else
- printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
-
- ret = crypto_register_ahash(&mv_hmac_sha1_alg);
- if (ret == 0) {
- cpg->has_hmac_sha1 = 1;
- } else {
- printk(KERN_WARNING MV_CESA
- "Could not register hmac-sha1 driver\n");
- }
-
- return 0;
-err_unreg_ecb:
- crypto_unregister_alg(&mv_aes_alg_ecb);
-err_irq:
- free_irq(irq, cp);
- if (!IS_ERR(cp->clk)) {
- clk_disable_unprepare(cp->clk);
- clk_put(cp->clk);
- }
-err_thread:
- kthread_stop(cp->queue_th);
-err:
- cpg = NULL;
- return ret;
-}
-
-static int mv_remove(struct platform_device *pdev)
-{
- struct crypto_priv *cp = platform_get_drvdata(pdev);
-
- crypto_unregister_alg(&mv_aes_alg_ecb);
- crypto_unregister_alg(&mv_aes_alg_cbc);
- if (cp->has_sha1)
- crypto_unregister_ahash(&mv_sha1_alg);
- if (cp->has_hmac_sha1)
- crypto_unregister_ahash(&mv_hmac_sha1_alg);
- kthread_stop(cp->queue_th);
- free_irq(cp->irq, cp);
- memset(cp->sram, 0, cp->sram_size);
-
- if (!IS_ERR(cp->clk)) {
- clk_disable_unprepare(cp->clk);
- clk_put(cp->clk);
- }
-
- cpg = NULL;
- return 0;
-}
-
-static const struct of_device_id mv_cesa_of_match_table[] = {
- { .compatible = "marvell,orion-crypto", },
- { .compatible = "marvell,kirkwood-crypto", },
- { .compatible = "marvell,dove-crypto", },
- {}
-};
-MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
-
-static struct platform_driver marvell_crypto = {
- .probe = mv_probe,
- .remove = mv_remove,
- .driver = {
- .name = "mv_crypto",
- .of_match_table = mv_cesa_of_match_table,
- },
-};
-MODULE_ALIAS("platform:mv_crypto");
-
-module_platform_driver(marvell_crypto);
-
-MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
-MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
deleted file mode 100644
index 0be3f0aa4afd..000000000000
--- a/drivers/crypto/mv_cesa.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __MV_CRYPTO_H__
-#define __MV_CRYPTO_H__
-
-#define DIGEST_INITIAL_VAL_A 0xdd00
-#define DIGEST_INITIAL_VAL_B 0xdd04
-#define DIGEST_INITIAL_VAL_C 0xdd08
-#define DIGEST_INITIAL_VAL_D 0xdd0c
-#define DIGEST_INITIAL_VAL_E 0xdd10
-#define DES_CMD_REG 0xdd58
-
-#define SEC_ACCEL_CMD 0xde00
-#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
-#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
-#define SEC_CMD_DISABLE_SEC (1 << 2)
-
-#define SEC_ACCEL_DESC_P0 0xde04
-#define SEC_DESC_P0_PTR(x) (x)
-
-#define SEC_ACCEL_DESC_P1 0xde14
-#define SEC_DESC_P1_PTR(x) (x)
-
-#define SEC_ACCEL_CFG 0xde08
-#define SEC_CFG_STOP_DIG_ERR (1 << 0)
-#define SEC_CFG_CH0_W_IDMA (1 << 7)
-#define SEC_CFG_CH1_W_IDMA (1 << 8)
-#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
-#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
-
-#define SEC_ACCEL_STATUS 0xde0c
-#define SEC_ST_ACT_0 (1 << 0)
-#define SEC_ST_ACT_1 (1 << 1)
-
-/*
- * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
- * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
- * someone forgot to remove it while switching to the core and moving to
- * SEC_ACCEL_INT_STATUS.
- */
-#define FPGA_INT_STATUS 0xdd68
-#define SEC_ACCEL_INT_STATUS 0xde20
-#define SEC_INT_AUTH_DONE (1 << 0)
-#define SEC_INT_DES_E_DONE (1 << 1)
-#define SEC_INT_AES_E_DONE (1 << 2)
-#define SEC_INT_AES_D_DONE (1 << 3)
-#define SEC_INT_ENC_DONE (1 << 4)
-#define SEC_INT_ACCEL0_DONE (1 << 5)
-#define SEC_INT_ACCEL1_DONE (1 << 6)
-#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
-#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
-
-#define SEC_ACCEL_INT_MASK 0xde24
-
-#define AES_KEY_LEN (8 * 4)
-
-struct sec_accel_config {
-
- u32 config;
-#define CFG_OP_MAC_ONLY 0
-#define CFG_OP_CRYPT_ONLY 1
-#define CFG_OP_MAC_CRYPT 2
-#define CFG_OP_CRYPT_MAC 3
-#define CFG_MACM_MD5 (4 << 4)
-#define CFG_MACM_SHA1 (5 << 4)
-#define CFG_MACM_HMAC_MD5 (6 << 4)
-#define CFG_MACM_HMAC_SHA1 (7 << 4)
-#define CFG_ENCM_DES (1 << 8)
-#define CFG_ENCM_3DES (2 << 8)
-#define CFG_ENCM_AES (3 << 8)
-#define CFG_DIR_ENC (0 << 12)
-#define CFG_DIR_DEC (1 << 12)
-#define CFG_ENC_MODE_ECB (0 << 16)
-#define CFG_ENC_MODE_CBC (1 << 16)
-#define CFG_3DES_EEE (0 << 20)
-#define CFG_3DES_EDE (1 << 20)
-#define CFG_AES_LEN_128 (0 << 24)
-#define CFG_AES_LEN_192 (1 << 24)
-#define CFG_AES_LEN_256 (2 << 24)
-#define CFG_NOT_FRAG (0 << 30)
-#define CFG_FIRST_FRAG (1 << 30)
-#define CFG_LAST_FRAG (2 << 30)
-#define CFG_MID_FRAG (3 << 30)
-
- u32 enc_p;
-#define ENC_P_SRC(x) (x)
-#define ENC_P_DST(x) ((x) << 16)
-
- u32 enc_len;
-#define ENC_LEN(x) (x)
-
- u32 enc_key_p;
-#define ENC_KEY_P(x) (x)
-
- u32 enc_iv;
-#define ENC_IV_POINT(x) ((x) << 0)
-#define ENC_IV_BUF_POINT(x) ((x) << 16)
-
- u32 mac_src_p;
-#define MAC_SRC_DATA_P(x) (x)
-#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
-
- u32 mac_digest;
-#define MAC_DIGEST_P(x) (x)
-#define MAC_FRAG_LEN(x) ((x) << 16)
- u32 mac_iv;
-#define MAC_INNER_IV_P(x) (x)
-#define MAC_OUTER_IV_P(x) ((x) << 16)
-}__attribute__ ((packed));
- /*
- * /-----------\ 0
- * | ACCEL CFG | 4 * 8
- * |-----------| 0x20
- * | CRYPT KEY | 8 * 4
- * |-----------| 0x40
- * | IV IN | 4 * 4
- * |-----------| 0x40 (inplace)
- * | IV BUF | 4 * 4
- * |-----------| 0x80
- * | DATA IN | 16 * x (max ->max_req_size)
- * |-----------| 0x80 (inplace operation)
- * | DATA OUT | 16 * x (max ->max_req_size)
- * \-----------/ SRAM size
- */
-
- /* Hashing memory map:
- * /-----------\ 0
- * | ACCEL CFG | 4 * 8
- * |-----------| 0x20
- * | Inner IV | 5 * 4
- * |-----------| 0x34
- * | Outer IV | 5 * 4
- * |-----------| 0x48
- * | Output BUF| 5 * 4
- * |-----------| 0x80
- * | DATA IN | 64 * x (max ->max_req_size)
- * \-----------/ SRAM size
- */
-#define SRAM_CONFIG 0x00
-#define SRAM_DATA_KEY_P 0x20
-#define SRAM_DATA_IV 0x40
-#define SRAM_DATA_IV_BUF 0x40
-#define SRAM_DATA_IN_START 0x80
-#define SRAM_DATA_OUT_START 0x80
-
-#define SRAM_HMAC_IV_IN 0x20
-#define SRAM_HMAC_IV_OUT 0x34
-#define SRAM_DIGEST_BUF 0x48
-
-#define SRAM_CFG_SPACE 0x80
-
-#endif
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index a9fd8b9e86cd..662e709812cc 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
CWQ_ENTRY_SIZE, 0, NULL);
if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+ queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
return -ENOMEM;
}
return 0;
@@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
{
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+ queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
+ queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
}
static long spu_queue_register_workfn(void *arg)
@@ -1962,10 +1965,8 @@ static struct n2_crypto *alloc_n2cp(void)
static void free_n2cp(struct n2_crypto *np)
{
- if (np->cwq_info.ino_table) {
- kfree(np->cwq_info.ino_table);
- np->cwq_info.ino_table = NULL;
- }
+ kfree(np->cwq_info.ino_table);
+ np->cwq_info.ino_table = NULL;
kfree(np);
}
@@ -2079,10 +2080,8 @@ static struct n2_mau *alloc_ncp(void)
static void free_ncp(struct n2_mau *mp)
{
- if (mp->mau_info.ino_table) {
- kfree(mp->mau_info.ino_table);
- mp->mau_info.ino_table = NULL;
- }
+ kfree(mp->mau_info.ino_table);
+ mp->mau_info.ino_table = NULL;
kfree(mp);
}
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 874ddf5e9087..1e87637c412d 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -46,7 +46,6 @@ struct nx842_workmem {
ktime_t start;
- struct vas_window *txwin; /* Used with VAS function */
char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */
} __packed __aligned(WORKMEM_ALIGN);
@@ -65,7 +64,7 @@ struct nx842_coproc {
* Send the request to NX engine on the chip for the corresponding CPU
* where the process is executing. Use with VAS function.
*/
-static DEFINE_PER_CPU(struct nx842_coproc *, coproc_inst);
+static DEFINE_PER_CPU(struct vas_window *, cpu_txwin);
/* no cpu hotplug on powernv, so this list never changes after init */
static LIST_HEAD(nx842_coprocs);
@@ -193,7 +192,7 @@ static int wait_for_csb(struct nx842_workmem *wmem,
ktime_t start = wmem->start, now = ktime_get();
ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
- while (!(ACCESS_ONCE(csb->flags) & CSB_V)) {
+ while (!(READ_ONCE(csb->flags) & CSB_V)) {
cpu_relax();
now = ktime_get();
if (ktime_after(now, timeout))
@@ -586,16 +585,11 @@ static int nx842_exec_vas(const unsigned char *in, unsigned int inlen,
ccw = SET_FIELD(CCW_FC_842, ccw, fc);
crb->ccw = cpu_to_be32(ccw);
- txwin = wmem->txwin;
- /* shoudn't happen, we don't load without a coproc */
- if (!txwin) {
- pr_err_ratelimited("NX-842 coprocessor is not available");
- return -ENODEV;
- }
-
do {
wmem->start = ktime_get();
preempt_disable();
+ txwin = this_cpu_read(cpu_txwin);
+
/*
* VAS copy CRB into L2 cache. Refer <asm/vas.h>.
* @crb and @offset.
@@ -689,25 +683,6 @@ static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc,
list_add(&coproc->list, &nx842_coprocs);
}
-/*
- * Identify chip ID for each CPU and save coprocesor adddress for the
- * corresponding NX engine in percpu coproc_inst.
- * coproc_inst is used in crypto_init to open send window on the NX instance
- * for the corresponding CPU / chip where the open request is executed.
- */
-static void nx842_set_per_cpu_coproc(struct nx842_coproc *coproc)
-{
- unsigned int i, chip_id;
-
- for_each_possible_cpu(i) {
- chip_id = cpu_to_chip_id(i);
-
- if (coproc->chip_id == chip_id)
- per_cpu(coproc_inst, i) = coproc;
- }
-}
-
-
static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
{
struct vas_window *txwin = NULL;
@@ -725,15 +700,58 @@ static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
* Open a VAS send window which is used to send request to NX.
*/
txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr);
- if (IS_ERR(txwin)) {
+ if (IS_ERR(txwin))
pr_err("ibm,nx-842: Can not open TX window: %ld\n",
PTR_ERR(txwin));
- return NULL;
- }
return txwin;
}
+/*
+ * Identify chip ID for each CPU, open send wndow for the corresponding NX
+ * engine and save txwin in percpu cpu_txwin.
+ * cpu_txwin is used in copy/paste operation for each compression /
+ * decompression request.
+ */
+static int nx842_open_percpu_txwins(void)
+{
+ struct nx842_coproc *coproc, *n;
+ unsigned int i, chip_id;
+
+ for_each_possible_cpu(i) {
+ struct vas_window *txwin = NULL;
+
+ chip_id = cpu_to_chip_id(i);
+
+ list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
+ /*
+ * Kernel requests use only high priority FIFOs. So
+ * open send windows for these FIFOs.
+ */
+
+ if (coproc->ct != VAS_COP_TYPE_842_HIPRI)
+ continue;
+
+ if (coproc->chip_id == chip_id) {
+ txwin = nx842_alloc_txwin(coproc);
+ if (IS_ERR(txwin))
+ return PTR_ERR(txwin);
+
+ per_cpu(cpu_txwin, i) = txwin;
+ break;
+ }
+ }
+
+ if (!per_cpu(cpu_txwin, i)) {
+ /* shouldn't happen, Each chip will have NX engine */
+ pr_err("NX engine is not available for CPU %d\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
int vasid)
{
@@ -819,14 +837,6 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
coproc->vas.id = vasid;
nx842_add_coprocs_list(coproc, chip_id);
- /*
- * Kernel requests use only high priority FIFOs. So save coproc
- * info in percpu coproc_inst which will be used to open send
- * windows for crypto open requests later.
- */
- if (coproc->ct == VAS_COP_TYPE_842_HIPRI)
- nx842_set_per_cpu_coproc(coproc);
-
return 0;
err_out:
@@ -847,24 +857,12 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn)
return -EINVAL;
}
- for_each_compatible_node(dn, NULL, "ibm,power9-vas-x") {
- if (of_get_ibm_chip_id(dn) == chip_id)
- break;
- }
-
- if (!dn) {
- pr_err("Missing VAS device node\n");
+ vasid = chip_to_vas_id(chip_id);
+ if (vasid < 0) {
+ pr_err("Unable to map chip_id %d to vasid\n", chip_id);
return -EINVAL;
}
- if (of_property_read_u32(dn, "ibm,vas-id", &vasid)) {
- pr_err("Missing ibm,vas-id device property\n");
- of_node_put(dn);
- return -EINVAL;
- }
-
- of_node_put(dn);
-
for_each_child_of_node(pn, dn) {
if (of_device_is_compatible(dn, "ibm,p9-nx-842")) {
ret = vas_cfg_coproc_info(dn, chip_id, vasid);
@@ -928,6 +926,19 @@ static int __init nx842_powernv_probe(struct device_node *dn)
static void nx842_delete_coprocs(void)
{
struct nx842_coproc *coproc, *n;
+ struct vas_window *txwin;
+ int i;
+
+ /*
+ * close percpu txwins that are opened for the corresponding coproc.
+ */
+ for_each_possible_cpu(i) {
+ txwin = per_cpu(cpu_txwin, i);
+ if (txwin)
+ vas_win_close(txwin);
+
+ per_cpu(cpu_txwin, i) = 0;
+ }
list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
if (coproc->vas.rxwin)
@@ -954,46 +965,6 @@ static struct nx842_driver nx842_powernv_driver = {
.decompress = nx842_powernv_decompress,
};
-static int nx842_powernv_crypto_init_vas(struct crypto_tfm *tfm)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_workmem *wmem;
- struct nx842_coproc *coproc;
- int ret;
-
- ret = nx842_crypto_init(tfm, &nx842_powernv_driver);
-
- if (ret)
- return ret;
-
- wmem = PTR_ALIGN((struct nx842_workmem *)ctx->wmem, WORKMEM_ALIGN);
- coproc = per_cpu(coproc_inst, smp_processor_id());
-
- ret = -EINVAL;
- if (coproc && coproc->vas.rxwin) {
- wmem->txwin = nx842_alloc_txwin(coproc);
- if (!IS_ERR(wmem->txwin))
- return 0;
-
- ret = PTR_ERR(wmem->txwin);
- }
-
- return ret;
-}
-
-void nx842_powernv_crypto_exit_vas(struct crypto_tfm *tfm)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_workmem *wmem;
-
- wmem = PTR_ALIGN((struct nx842_workmem *)ctx->wmem, WORKMEM_ALIGN);
-
- if (wmem && wmem->txwin)
- vas_win_close(wmem->txwin);
-
- nx842_crypto_exit(tfm);
-}
-
static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
{
return nx842_crypto_init(tfm, &nx842_powernv_driver);
@@ -1044,9 +1015,13 @@ static __init int nx842_powernv_init(void)
nx842_powernv_exec = nx842_exec_icswx;
} else {
+ ret = nx842_open_percpu_txwins();
+ if (ret) {
+ nx842_delete_coprocs();
+ return ret;
+ }
+
nx842_powernv_exec = nx842_exec_vas;
- nx842_powernv_alg.cra_init = nx842_powernv_crypto_init_vas;
- nx842_powernv_alg.cra_exit = nx842_powernv_crypto_exit_vas;
}
ret = crypto_register_alg(&nx842_powernv_alg);
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index cddc6d8b55d9..bf52cd1d7fca 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -1082,7 +1082,7 @@ static int nx842_remove(struct vio_dev *viodev)
return 0;
}
-static struct vio_device_id nx842_vio_driver_ids[] = {
+static const struct vio_device_id nx842_vio_driver_ids[] = {
{"ibm,compression-v1", "ibm,compression"},
{"", ""},
};
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index da3cb8c35ec7..d94e25df503b 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -116,7 +116,7 @@ int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
spin_lock_init(&ctx->lock);
ctx->driver = driver;
- ctx->wmem = kzalloc(driver->workmem_size, GFP_KERNEL);
+ ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index abd465f479c4..a810596b97c2 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -22,6 +22,7 @@
#include <crypto/internal/aead.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
+#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -433,7 +434,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
char *iv = rctx->iv;
- memcpy(iv, req->iv, 12);
+ memcpy(iv, req->iv, GCM_AES_IV_SIZE);
return gcm_aes_nx_crypt(req, 1, req->assoclen);
}
@@ -443,7 +444,7 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
char *iv = rctx->iv;
- memcpy(iv, req->iv, 12);
+ memcpy(iv, req->iv, GCM_AES_IV_SIZE);
return gcm_aes_nx_crypt(req, 0, req->assoclen);
}
@@ -498,7 +499,7 @@ struct aead_alg nx_gcm_aes_alg = {
},
.init = nx_crypto_ctx_aes_gcm_init,
.exit = nx_crypto_ctx_aead_exit,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = gcm_aes_nx_set_key,
.encrypt = gcm_aes_nx_encrypt,
@@ -516,7 +517,7 @@ struct aead_alg nx_gcm4106_aes_alg = {
},
.init = nx_crypto_ctx_aes_gcm_init,
.exit = nx_crypto_ctx_aead_exit,
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = gcm4106_aes_nx_set_key,
.setauthsize = gcm4106_aes_nx_setauthsize,
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 036057abb257..3a5e31be4764 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -833,7 +833,7 @@ static void __exit nx_fini(void)
vio_unregister_driver(&nx_driver.viodriver);
}
-static struct vio_device_id nx_crypto_driver_ids[] = {
+static const struct vio_device_id nx_crypto_driver_ids[] = {
{ "ibm,sym-encryption-v1", "ibm,sym-encryption" },
{ "", "" }
};
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 7d4f8a4be6d8..0cc3b65d7162 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -18,6 +18,7 @@
#include <linux/omap-dma.h>
#include <linux/interrupt.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
@@ -186,7 +187,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
if (!sk_req) {
pr_err("skcipher: Failed to allocate request\n");
- return -1;
+ return -ENOMEM;
}
init_completion(&result.completion);
@@ -214,7 +215,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
}
/* fall through */
default:
- pr_err("Encryption of IV failed for GCM mode");
+ pr_err("Encryption of IV failed for GCM mode\n");
break;
}
@@ -311,7 +312,7 @@ static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
int err, assoclen;
memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
- memcpy(rctx->iv + 12, &counter, 4);
+ memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
if (err)
@@ -339,7 +340,7 @@ int omap_aes_gcm_encrypt(struct aead_request *req)
{
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, req->iv, 12);
+ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
}
@@ -347,7 +348,7 @@ int omap_aes_gcm_decrypt(struct aead_request *req)
{
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, req->iv, 12);
+ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
return omap_aes_gcm_crypt(req, FLAGS_GCM);
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index c376a3ee7c2c..fbec0a2e76dd 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/engine.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/aead.h>
@@ -767,7 +768,7 @@ static struct aead_alg algs_aead_gcm[] = {
},
.init = omap_aes_gcm_cra_init,
.exit = omap_aes_gcm_cra_exit,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = omap_aes_gcm_setkey,
.encrypt = omap_aes_gcm_encrypt,
@@ -788,7 +789,7 @@ static struct aead_alg algs_aead_gcm[] = {
.init = omap_aes_gcm_cra_init,
.exit = omap_aes_gcm_cra_exit,
.maxauthsize = AES_BLOCK_SIZE,
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.setkey = omap_aes_4106gcm_setkey,
.encrypt = omap_aes_4106gcm_encrypt,
.decrypt = omap_aes_4106gcm_decrypt,
@@ -974,11 +975,10 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
struct device *dev, struct resource *res)
{
struct device_node *node = dev->of_node;
- const struct of_device_id *match;
int err = 0;
- match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
- if (!match) {
+ dd->pdata = of_device_get_match_data(dev);
+ if (!dd->pdata) {
dev_err(dev, "no compatible OF match\n");
err = -EINVAL;
goto err;
@@ -991,8 +991,6 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
goto err;
}
- dd->pdata = match->data;
-
err:
return err;
}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index d37c9506c36c..ebc5c0f11f03 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -928,16 +928,13 @@ MODULE_DEVICE_TABLE(of, omap_des_of_match);
static int omap_des_get_of(struct omap_des_dev *dd,
struct platform_device *pdev)
{
- const struct of_device_id *match;
- match = of_match_device(of_match_ptr(omap_des_of_match), &pdev->dev);
- if (!match) {
+ dd->pdata = of_device_get_match_data(&pdev->dev);
+ if (!dd->pdata) {
dev_err(&pdev->dev, "no compatible OF match\n");
return -EINVAL;
}
- dd->pdata = match->data;
-
return 0;
}
#else
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index c40ac30ec002..86b89ace836f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1944,11 +1944,10 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
struct device *dev, struct resource *res)
{
struct device_node *node = dev->of_node;
- const struct of_device_id *match;
int err = 0;
- match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
- if (!match) {
+ dd->pdata = of_device_get_match_data(dev);
+ if (!dd->pdata) {
dev_err(dev, "no compatible OF match\n");
err = -EINVAL;
goto err;
@@ -1968,8 +1967,6 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
goto err;
}
- dd->pdata = match->data;
-
err:
return err;
}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index b3869748cc6b..1c6cbda56afe 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -482,7 +482,7 @@ static struct crypto_alg cbc_aes_alg = {
}
};
-static struct x86_cpu_id padlock_cpu_id[] = {
+static const struct x86_cpu_id padlock_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
{}
};
@@ -512,7 +512,7 @@ static int __init padlock_init(void)
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
- if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
+ if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index bc72d20c32c3..d32c79328876 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -509,7 +509,7 @@ static struct shash_alg sha256_alg_nano = {
}
};
-static struct x86_cpu_id padlock_sha_ids[] = {
+static const struct x86_cpu_id padlock_sha_ids[] = {
X86_FEATURE_MATCH(X86_FEATURE_PHE),
{}
};
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index b6f14844702e..4ef52c9d72fc 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1125,9 +1125,9 @@ static irqreturn_t spacc_spacc_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static void spacc_packet_timeout(unsigned long data)
+static void spacc_packet_timeout(struct timer_list *t)
{
- struct spacc_engine *engine = (struct spacc_engine *)data;
+ struct spacc_engine *engine = from_timer(engine, t, packet_timeout);
spacc_process_done(engine);
}
@@ -1618,7 +1618,7 @@ MODULE_DEVICE_TABLE(of, spacc_of_id_table);
static int spacc_probe(struct platform_device *pdev)
{
- int i, err, ret = -EINVAL;
+ int i, err, ret;
struct resource *mem, *irq;
struct device_node *np = pdev->dev.of_node;
struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
@@ -1679,22 +1679,18 @@ static int spacc_probe(struct platform_device *pdev)
engine->clk = clk_get(&pdev->dev, "ref");
if (IS_ERR(engine->clk)) {
dev_info(&pdev->dev, "clk unavailable\n");
- device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
return PTR_ERR(engine->clk);
}
if (clk_prepare_enable(engine->clk)) {
dev_info(&pdev->dev, "unable to prepare/enable clk\n");
- clk_put(engine->clk);
- return -EIO;
+ ret = -EIO;
+ goto err_clk_put;
}
- err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
- if (err) {
- clk_disable_unprepare(engine->clk);
- clk_put(engine->clk);
- return err;
- }
+ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ if (ret)
+ goto err_clk_disable;
/*
@@ -1714,8 +1710,7 @@ static int spacc_probe(struct platform_device *pdev)
writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
engine->regs + SPA_IRQ_EN_REG_OFFSET);
- setup_timer(&engine->packet_timeout, spacc_packet_timeout,
- (unsigned long)engine);
+ timer_setup(&engine->packet_timeout, spacc_packet_timeout, 0);
INIT_LIST_HEAD(&engine->pending);
INIT_LIST_HEAD(&engine->completed);
@@ -1726,6 +1721,7 @@ static int spacc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, engine);
+ ret = -EINVAL;
INIT_LIST_HEAD(&engine->registered_algs);
for (i = 0; i < engine->num_algs; ++i) {
engine->algs[i].engine = engine;
@@ -1760,6 +1756,16 @@ static int spacc_probe(struct platform_device *pdev)
engine->aeads[i].alg.base.cra_name);
}
+ if (!ret)
+ return 0;
+
+ del_timer_sync(&engine->packet_timeout);
+ device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+err_clk_disable:
+ clk_disable_unprepare(engine->clk);
+err_clk_put:
+ clk_put(engine->clk);
+
return ret;
}
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 8afac52677a6..2d06409bd3c4 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -228,11 +228,8 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
list_add_tail(&map->list, &vfs_table);
} else if (accel_dev->is_vf && pf) {
/* VF on host */
- struct adf_accel_vf_info *vf_info;
struct vf_id_map *map;
- vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
-
map = adf_find_vf(adf_get_vf_num(accel_dev));
if (map) {
struct vf_id_map *next;
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 6f5dd68449c6..13c52d6bf630 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -443,9 +443,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
- if (unlikely(!params->p || !params->g))
- return -EINVAL;
-
if (qat_dh_check_params_length(params->p_size << 3))
return -EINVAL;
@@ -462,11 +459,8 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
}
ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
- if (!ctx->g) {
- dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
- ctx->p = NULL;
+ if (!ctx->g)
return -ENOMEM;
- }
memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
params->g_size);
@@ -507,18 +501,22 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
ret = qat_dh_set_params(ctx, &params);
if (ret < 0)
- return ret;
+ goto err_clear_ctx;
ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
GFP_KERNEL);
if (!ctx->xa) {
- qat_dh_clear_ctx(dev, ctx);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_clear_ctx;
}
memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
params.key_size);
return 0;
+
+err_clear_ctx:
+ qat_dh_clear_ctx(dev, ctx);
+ return ret;
}
static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 8c4fd255a601..ff149e176f64 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -117,19 +117,19 @@ void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
#define CSR_RETRY_TIMES 500
static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
- unsigned char ae, unsigned int csr,
- unsigned int *value)
+ unsigned char ae, unsigned int csr)
{
unsigned int iterations = CSR_RETRY_TIMES;
+ int value;
do {
- *value = GET_AE_CSR(handle, ae, csr);
+ value = GET_AE_CSR(handle, ae, csr);
if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
- return 0;
+ return value;
} while (iterations--);
pr_err("QAT: Read CSR timeout\n");
- return -EFAULT;
+ return 0;
}
static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
@@ -154,9 +154,9 @@ static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
+ *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
@@ -169,13 +169,13 @@ static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
int times = MAX_RETRY_TIMES;
int elapsed_cycles = 0;
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
while ((int)cycles > elapsed_cycles && times--) {
if (chk_inactive)
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
elapsed_cycles = cur_cnt - base_cnt;
@@ -207,7 +207,7 @@ int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
}
/* Sets the accelaration engine context mode to either four or eight */
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr = IGNORE_W1C_MASK & csr;
new_csr = (mode == 4) ?
SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
@@ -221,7 +221,7 @@ int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
new_csr = (mode) ?
@@ -240,7 +240,7 @@ int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
switch (lm_type) {
case ICP_LMEM0:
@@ -328,7 +328,7 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
@@ -340,16 +340,18 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
-static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
- unsigned int ae_csr, unsigned int *csr_val)
+ unsigned int ae_csr)
{
- unsigned int cur_ctx;
+ unsigned int cur_ctx, csr_val;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
- qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+
+ return csr_val;
}
static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
@@ -358,7 +360,7 @@ static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
@@ -374,7 +376,7 @@ static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
@@ -392,13 +394,11 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
int times = MAX_RETRY_TIMES;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
- (unsigned int *)&base_cnt);
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
do {
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
- (unsigned int *)&cur_cnt);
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
} while (times-- && (cur_cnt == base_cnt));
@@ -416,8 +416,8 @@ int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
{
unsigned int enable = 0, active = 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
+ enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
(active & (1 << ACS_ABO_BITPOS)))
return 1;
@@ -540,7 +540,7 @@ static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK &
(~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
@@ -583,7 +583,7 @@ void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned int ustore_addr;
unsigned int i;
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
@@ -604,7 +604,7 @@ static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK;
ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
ctx |= (ctx_mask << CE_ENABLE_BITPOS);
@@ -636,10 +636,10 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
int ret = 0;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr_val &= IGNORE_W1C_MASK;
csr_val |= CE_NN_MODE;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
@@ -648,7 +648,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
handle->hal_handle->upc_mask &
INIT_PC_VALUE);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
qat_hal_wr_indr_csr(handle, ae, ctx_mask,
@@ -760,7 +760,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
unsigned int csr_val = 0;
- qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
csr_val |= 0x1;
qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
}
@@ -826,16 +826,16 @@ static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned int i, uwrd_lo, uwrd_hi;
unsigned int ustore_addr, misc_control;
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
+ misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
misc_control & 0xfffffffb);
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
for (i = 0; i < words_num; i++) {
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
uaddr++;
- qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
- qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
+ uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
+ uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
uword[i] = uwrd_hi;
uword[i] = (uword[i] << 0x20) | uwrd_lo;
}
@@ -849,7 +849,7 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
{
unsigned int i, ustore_addr;
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
@@ -890,26 +890,27 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
}
/* save current context */
- qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
- qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
- qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
- &ind_lm_addr_byte0);
- qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
- &ind_lm_addr_byte1);
+ ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
+ ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
+ ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_0_BYTE_INDEX);
+ ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_1_BYTE_INDEX);
if (inst_num <= MAX_EXEC_INST)
qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
+ savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
- qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
- qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
- &ind_cnt_sig);
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
- qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
+ savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ FUTURE_COUNT_SIGNAL_INDIRECT);
+ ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_SIG_EVENTS_INDIRECT);
+ act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
/* execute micro codes */
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
@@ -927,8 +928,8 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
if (endpc) {
unsigned int ctx_status;
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
- &ctx_status);
+ ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_STS_INDIRECT);
*endpc = ctx_status & handle->hal_handle->upc_mask;
}
/* retore to saved context */
@@ -938,7 +939,7 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
handle->hal_handle->upc_mask & savpc);
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
@@ -986,16 +987,16 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
break;
}
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
if (ctx != (savctx & ACS_ACNO))
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
ctx & ACS_ACNO);
qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr = UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
insts = qat_hal_set_uword_ecc(insts);
@@ -1011,7 +1012,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
* the instruction should have been executed
* prior to clearing the ECS in putUwords
*/
- qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
+ *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
if (ctx != (savctx & ACS_ACNO))
@@ -1188,7 +1189,7 @@ static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
unsigned short mask;
unsigned short dr_offset = 0x10;
- status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
@@ -1238,7 +1239,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
const unsigned short gprnum = 0, dly = num_inst * 0x5;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
@@ -1282,7 +1283,7 @@ static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
unsigned int ctx_enables;
int stat = 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
@@ -1299,7 +1300,7 @@ static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
{
unsigned int ctx_enables;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (ctx_enables & CE_INUSE_CONTEXTS) {
/* 4-ctx mode */
*relreg = absreg_num & 0x1F;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index e2454d90d949..98d22c2096e3 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -567,26 +567,26 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
code_page->imp_expr_tab_offset);
if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
imp_expr_tab->entry_num) {
- pr_err("QAT: UOF can't contain imported variable to be parsed");
+ pr_err("QAT: UOF can't contain imported variable to be parsed\n");
return -EINVAL;
}
neigh_reg_tab = (struct icp_qat_uof_objtable *)
(encap_uof_obj->beg_uof +
code_page->neigh_reg_tab_offset);
if (neigh_reg_tab->entry_num) {
- pr_err("QAT: UOF can't contain shared control store feature");
+ pr_err("QAT: UOF can't contain shared control store feature\n");
return -EINVAL;
}
if (image->numpages > 1) {
- pr_err("QAT: UOF can't contain multiple pages");
+ pr_err("QAT: UOF can't contain multiple pages\n");
return -EINVAL;
}
if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use shared control store feature");
+ pr_err("QAT: UOF can't use shared control store feature\n");
return -EFAULT;
}
if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use reloadable feature");
+ pr_err("QAT: UOF can't use reloadable feature\n");
return -EFAULT;
}
return 0;
@@ -702,7 +702,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
}
}
if (!mflag) {
- pr_err("QAT: uimage uses AE not set");
+ pr_err("QAT: uimage uses AE not set\n");
return -EINVAL;
}
return 0;
@@ -791,6 +791,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_GPA_ABS:
case ICP_GPB_ABS:
ctx_mask = 0;
+ /* fall through */
case ICP_GPA_REL:
case ICP_GPB_REL:
return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
@@ -800,6 +801,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_SR_RD_ABS:
case ICP_DR_RD_ABS:
ctx_mask = 0;
+ /* fall through */
case ICP_SR_REL:
case ICP_DR_REL:
case ICP_SR_RD_REL:
@@ -809,6 +811,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_SR_WR_ABS:
case ICP_DR_WR_ABS:
ctx_mask = 0;
+ /* fall through */
case ICP_SR_WR_REL:
case ICP_DR_WR_REL:
return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index b04b42f48366..ea4d96bf47e8 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -248,10 +248,7 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm)
ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->fallback))
- return PTR_ERR(ctx->fallback);
-
- return 0;
+ return PTR_ERR_OR_ZERO(ctx->fallback);
}
static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 47e114ac09d0..53227d70d397 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -349,28 +349,12 @@ static int qce_ahash_digest(struct ahash_request *req)
return qce->async_req_enqueue(tmpl->qce, &req->base);
}
-struct qce_ahash_result {
- struct completion completion;
- int error;
-};
-
-static void qce_digest_complete(struct crypto_async_request *req, int error)
-{
- struct qce_ahash_result *result = req->data;
-
- if (error == -EINPROGRESS)
- return;
-
- result->error = error;
- complete(&result->completion);
-}
-
static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned int digestsize = crypto_ahash_digestsize(tfm);
struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
- struct qce_ahash_result result;
+ struct crypto_wait wait;
struct ahash_request *req;
struct scatterlist sg;
unsigned int blocksize;
@@ -405,9 +389,9 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
goto err_free_ahash;
}
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- qce_digest_complete, &result);
+ crypto_req_done, &wait);
crypto_ahash_clear_flags(ahash_tfm, ~0);
buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
@@ -420,13 +404,7 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
sg_init_one(&sg, buf, keylen);
ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
- ret = crypto_ahash_digest(req);
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- ret = wait_for_completion_interruptible(&result.completion);
- if (!ret)
- ret = result.error;
- }
-
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (ret)
crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 7ac657f46d15..5d64c08b7f47 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1,15 +1,13 @@
-/*
- * Cryptographic API.
- *
- * Support for Samsung S5PV210 HW acceleration.
- *
- * Copyright (C) 2011 NetUP Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Cryptographic API.
+//
+// Support for Samsung S5PV210 and Exynos HW acceleration.
+//
+// Copyright (C) 2011 NetUP Inc. All rights reserved.
+// Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
+//
+// Hash part based on omap-sham.c driver.
#include <linux/clk.h>
#include <linux/crypto.h>
@@ -30,98 +28,112 @@
#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
-#define _SBF(s, v) ((v) << (s))
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/internal/hash.h>
+
+#define _SBF(s, v) ((v) << (s))
/* Feed control registers */
-#define SSS_REG_FCINTSTAT 0x0000
-#define SSS_FCINTSTAT_BRDMAINT BIT(3)
-#define SSS_FCINTSTAT_BTDMAINT BIT(2)
-#define SSS_FCINTSTAT_HRDMAINT BIT(1)
-#define SSS_FCINTSTAT_PKDMAINT BIT(0)
-
-#define SSS_REG_FCINTENSET 0x0004
-#define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
-#define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
-#define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
-#define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
-
-#define SSS_REG_FCINTENCLR 0x0008
-#define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
-#define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
-#define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
-#define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
-
-#define SSS_REG_FCINTPEND 0x000C
-#define SSS_FCINTPEND_BRDMAINTP BIT(3)
-#define SSS_FCINTPEND_BTDMAINTP BIT(2)
-#define SSS_FCINTPEND_HRDMAINTP BIT(1)
-#define SSS_FCINTPEND_PKDMAINTP BIT(0)
-
-#define SSS_REG_FCFIFOSTAT 0x0010
-#define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
-#define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
-#define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
-#define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
-#define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
-#define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
-#define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
-#define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
-
-#define SSS_REG_FCFIFOCTRL 0x0014
-#define SSS_FCFIFOCTRL_DESSEL BIT(2)
-#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
-#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
-#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
-
-#define SSS_REG_FCBRDMAS 0x0020
-#define SSS_REG_FCBRDMAL 0x0024
-#define SSS_REG_FCBRDMAC 0x0028
-#define SSS_FCBRDMAC_BYTESWAP BIT(1)
-#define SSS_FCBRDMAC_FLUSH BIT(0)
-
-#define SSS_REG_FCBTDMAS 0x0030
-#define SSS_REG_FCBTDMAL 0x0034
-#define SSS_REG_FCBTDMAC 0x0038
-#define SSS_FCBTDMAC_BYTESWAP BIT(1)
-#define SSS_FCBTDMAC_FLUSH BIT(0)
-
-#define SSS_REG_FCHRDMAS 0x0040
-#define SSS_REG_FCHRDMAL 0x0044
-#define SSS_REG_FCHRDMAC 0x0048
-#define SSS_FCHRDMAC_BYTESWAP BIT(1)
-#define SSS_FCHRDMAC_FLUSH BIT(0)
-
-#define SSS_REG_FCPKDMAS 0x0050
-#define SSS_REG_FCPKDMAL 0x0054
-#define SSS_REG_FCPKDMAC 0x0058
-#define SSS_FCPKDMAC_BYTESWAP BIT(3)
-#define SSS_FCPKDMAC_DESCEND BIT(2)
-#define SSS_FCPKDMAC_TRANSMIT BIT(1)
-#define SSS_FCPKDMAC_FLUSH BIT(0)
-
-#define SSS_REG_FCPKDMAO 0x005C
+#define SSS_REG_FCINTSTAT 0x0000
+#define SSS_FCINTSTAT_HPARTINT BIT(7)
+#define SSS_FCINTSTAT_HDONEINT BIT(5)
+#define SSS_FCINTSTAT_BRDMAINT BIT(3)
+#define SSS_FCINTSTAT_BTDMAINT BIT(2)
+#define SSS_FCINTSTAT_HRDMAINT BIT(1)
+#define SSS_FCINTSTAT_PKDMAINT BIT(0)
+
+#define SSS_REG_FCINTENSET 0x0004
+#define SSS_FCINTENSET_HPARTINTENSET BIT(7)
+#define SSS_FCINTENSET_HDONEINTENSET BIT(5)
+#define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
+#define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
+#define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
+#define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
+
+#define SSS_REG_FCINTENCLR 0x0008
+#define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
+#define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
+#define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
+#define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
+#define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
+#define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
+
+#define SSS_REG_FCINTPEND 0x000C
+#define SSS_FCINTPEND_HPARTINTP BIT(7)
+#define SSS_FCINTPEND_HDONEINTP BIT(5)
+#define SSS_FCINTPEND_BRDMAINTP BIT(3)
+#define SSS_FCINTPEND_BTDMAINTP BIT(2)
+#define SSS_FCINTPEND_HRDMAINTP BIT(1)
+#define SSS_FCINTPEND_PKDMAINTP BIT(0)
+
+#define SSS_REG_FCFIFOSTAT 0x0010
+#define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
+#define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
+#define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
+#define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
+#define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
+#define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
+#define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
+#define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
+
+#define SSS_REG_FCFIFOCTRL 0x0014
+#define SSS_FCFIFOCTRL_DESSEL BIT(2)
+#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
+#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
+#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
+#define SSS_HASHIN_MASK _SBF(0, 0x03)
+
+#define SSS_REG_FCBRDMAS 0x0020
+#define SSS_REG_FCBRDMAL 0x0024
+#define SSS_REG_FCBRDMAC 0x0028
+#define SSS_FCBRDMAC_BYTESWAP BIT(1)
+#define SSS_FCBRDMAC_FLUSH BIT(0)
+
+#define SSS_REG_FCBTDMAS 0x0030
+#define SSS_REG_FCBTDMAL 0x0034
+#define SSS_REG_FCBTDMAC 0x0038
+#define SSS_FCBTDMAC_BYTESWAP BIT(1)
+#define SSS_FCBTDMAC_FLUSH BIT(0)
+
+#define SSS_REG_FCHRDMAS 0x0040
+#define SSS_REG_FCHRDMAL 0x0044
+#define SSS_REG_FCHRDMAC 0x0048
+#define SSS_FCHRDMAC_BYTESWAP BIT(1)
+#define SSS_FCHRDMAC_FLUSH BIT(0)
+
+#define SSS_REG_FCPKDMAS 0x0050
+#define SSS_REG_FCPKDMAL 0x0054
+#define SSS_REG_FCPKDMAC 0x0058
+#define SSS_FCPKDMAC_BYTESWAP BIT(3)
+#define SSS_FCPKDMAC_DESCEND BIT(2)
+#define SSS_FCPKDMAC_TRANSMIT BIT(1)
+#define SSS_FCPKDMAC_FLUSH BIT(0)
+
+#define SSS_REG_FCPKDMAO 0x005C
/* AES registers */
#define SSS_REG_AES_CONTROL 0x00
-#define SSS_AES_BYTESWAP_DI BIT(11)
-#define SSS_AES_BYTESWAP_DO BIT(10)
-#define SSS_AES_BYTESWAP_IV BIT(9)
-#define SSS_AES_BYTESWAP_CNT BIT(8)
-#define SSS_AES_BYTESWAP_KEY BIT(7)
-#define SSS_AES_KEY_CHANGE_MODE BIT(6)
-#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
-#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
-#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
-#define SSS_AES_FIFO_MODE BIT(3)
-#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
-#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
-#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
-#define SSS_AES_MODE_DECRYPT BIT(0)
+#define SSS_AES_BYTESWAP_DI BIT(11)
+#define SSS_AES_BYTESWAP_DO BIT(10)
+#define SSS_AES_BYTESWAP_IV BIT(9)
+#define SSS_AES_BYTESWAP_CNT BIT(8)
+#define SSS_AES_BYTESWAP_KEY BIT(7)
+#define SSS_AES_KEY_CHANGE_MODE BIT(6)
+#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
+#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
+#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
+#define SSS_AES_FIFO_MODE BIT(3)
+#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
+#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
+#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
+#define SSS_AES_MODE_DECRYPT BIT(0)
#define SSS_REG_AES_STATUS 0x04
-#define SSS_AES_BUSY BIT(2)
-#define SSS_AES_INPUT_READY BIT(1)
-#define SSS_AES_OUTPUT_READY BIT(0)
+#define SSS_AES_BUSY BIT(2)
+#define SSS_AES_INPUT_READY BIT(1)
+#define SSS_AES_OUTPUT_READY BIT(0)
#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
@@ -129,26 +141,97 @@
#define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
#define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
-#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
-#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
-#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
+#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
+#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
+#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
-#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
+#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
#define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
SSS_AES_REG(dev, reg))
/* HW engine modes */
-#define FLAGS_AES_DECRYPT BIT(0)
-#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
-#define FLAGS_AES_CBC _SBF(1, 0x01)
-#define FLAGS_AES_CTR _SBF(1, 0x02)
+#define FLAGS_AES_DECRYPT BIT(0)
+#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
+#define FLAGS_AES_CBC _SBF(1, 0x01)
+#define FLAGS_AES_CTR _SBF(1, 0x02)
+
+#define AES_KEY_LEN 16
+#define CRYPTO_QUEUE_LEN 1
+
+/* HASH registers */
+#define SSS_REG_HASH_CTRL 0x00
+
+#define SSS_HASH_USER_IV_EN BIT(5)
+#define SSS_HASH_INIT_BIT BIT(4)
+#define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
+#define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
+#define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
+
+#define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
+
+#define SSS_REG_HASH_CTRL_PAUSE 0x04
+
+#define SSS_HASH_PAUSE BIT(0)
+
+#define SSS_REG_HASH_CTRL_FIFO 0x08
+
+#define SSS_HASH_FIFO_MODE_DMA BIT(0)
+#define SSS_HASH_FIFO_MODE_CPU 0
-#define AES_KEY_LEN 16
-#define CRYPTO_QUEUE_LEN 1
+#define SSS_REG_HASH_CTRL_SWAP 0x0C
+
+#define SSS_HASH_BYTESWAP_DI BIT(3)
+#define SSS_HASH_BYTESWAP_DO BIT(2)
+#define SSS_HASH_BYTESWAP_IV BIT(1)
+#define SSS_HASH_BYTESWAP_KEY BIT(0)
+
+#define SSS_REG_HASH_STATUS 0x10
+
+#define SSS_HASH_STATUS_MSG_DONE BIT(6)
+#define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
+#define SSS_HASH_STATUS_BUFFER_READY BIT(0)
+
+#define SSS_REG_HASH_MSG_SIZE_LOW 0x20
+#define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
+
+#define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
+#define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
+
+#define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
+#define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
+
+#define HASH_BLOCK_SIZE 64
+#define HASH_REG_SIZEOF 4
+#define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
+#define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
+#define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
+
+/*
+ * HASH bit numbers, used by device, setting in dev->hash_flags with
+ * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
+ * to keep HASH state BUSY or FREE, or to signal state from irq_handler
+ * to hash_tasklet. SGS keep track of allocated memory for scatterlist
+ */
+#define HASH_FLAGS_BUSY 0
+#define HASH_FLAGS_FINAL 1
+#define HASH_FLAGS_DMA_ACTIVE 2
+#define HASH_FLAGS_OUTPUT_READY 3
+#define HASH_FLAGS_DMA_READY 4
+#define HASH_FLAGS_SGS_COPIED 5
+#define HASH_FLAGS_SGS_ALLOCED 6
+
+/* HASH HW constants */
+#define BUFLEN HASH_BLOCK_SIZE
+
+#define SSS_HASH_DMA_LEN_ALIGN 8
+#define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
+
+#define SSS_HASH_QUEUE_LENGTH 10
/**
* struct samsung_aes_variant - platform specific SSS driver data
* @aes_offset: AES register offset from SSS module's base.
+ * @hash_offset: HASH register offset from SSS module's base.
*
* Specifies platform specific configuration of SSS module.
* Note: A structure for driver specific platform data is used for future
@@ -156,6 +239,7 @@
*/
struct samsung_aes_variant {
unsigned int aes_offset;
+ unsigned int hash_offset;
};
struct s5p_aes_reqctx {
@@ -195,6 +279,19 @@ struct s5p_aes_ctx {
* protects against concurrent access to these fields.
* @lock: Lock for protecting both access to device hardware registers
* and fields related to current request (including the busy field).
+ * @res: Resources for hash.
+ * @io_hash_base: Per-variant offset for HASH block IO memory.
+ * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
+ * variable.
+ * @hash_flags: Flags for current HASH op.
+ * @hash_queue: Async hash queue.
+ * @hash_tasklet: New HASH request scheduling job.
+ * @xmit_buf: Buffer for current HASH request transfer into SSS block.
+ * @hash_req: Current request sending to SSS HASH block.
+ * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
+ * @hash_sg_cnt: Counter for hash_sg_iter.
+ *
+ * @use_hash: true if HASH algs enabled
*/
struct s5p_aes_dev {
struct device *dev;
@@ -215,16 +312,83 @@ struct s5p_aes_dev {
struct crypto_queue queue;
bool busy;
spinlock_t lock;
+
+ struct resource *res;
+ void __iomem *io_hash_base;
+
+ spinlock_t hash_lock; /* protect hash_ vars */
+ unsigned long hash_flags;
+ struct crypto_queue hash_queue;
+ struct tasklet_struct hash_tasklet;
+
+ u8 xmit_buf[BUFLEN];
+ struct ahash_request *hash_req;
+ struct scatterlist *hash_sg_iter;
+ unsigned int hash_sg_cnt;
+
+ bool use_hash;
};
-static struct s5p_aes_dev *s5p_dev;
+/**
+ * struct s5p_hash_reqctx - HASH request context
+ * @dd: Associated device
+ * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
+ * @digcnt: Number of bytes processed by HW (without buffer[] ones)
+ * @digest: Digest message or IV for partial result
+ * @nregs: Number of HW registers for digest or IV read/write
+ * @engine: Bits for selecting type of HASH in SSS block
+ * @sg: sg for DMA transfer
+ * @sg_len: Length of sg for DMA transfer
+ * @sgl[]: sg for joining buffer and req->src scatterlist
+ * @skip: Skip offset in req->src for current op
+ * @total: Total number of bytes for current request
+ * @finup: Keep state for finup or final.
+ * @error: Keep track of error.
+ * @bufcnt: Number of bytes holded in buffer[]
+ * @buffer[]: For byte(s) from end of req->src in UPDATE op
+ */
+struct s5p_hash_reqctx {
+ struct s5p_aes_dev *dd;
+ bool op_update;
+
+ u64 digcnt;
+ u8 digest[SHA256_DIGEST_SIZE];
+
+ unsigned int nregs; /* digest_size / sizeof(reg) */
+ u32 engine;
+
+ struct scatterlist *sg;
+ unsigned int sg_len;
+ struct scatterlist sgl[2];
+ unsigned int skip;
+ unsigned int total;
+ bool finup;
+ bool error;
+
+ u32 bufcnt;
+ u8 buffer[0];
+};
+
+/**
+ * struct s5p_hash_ctx - HASH transformation context
+ * @dd: Associated device
+ * @flags: Bits for algorithm HASH.
+ * @fallback: Software transformation for zero message or size < BUFLEN.
+ */
+struct s5p_hash_ctx {
+ struct s5p_aes_dev *dd;
+ unsigned long flags;
+ struct crypto_shash *fallback;
+};
static const struct samsung_aes_variant s5p_aes_data = {
.aes_offset = 0x4000,
+ .hash_offset = 0x6000,
};
static const struct samsung_aes_variant exynos_aes_data = {
.aes_offset = 0x200,
+ .hash_offset = 0x400,
};
static const struct of_device_id s5p_sss_dt_match[] = {
@@ -254,6 +418,8 @@ static inline struct samsung_aes_variant *find_s5p_sss_version
platform_get_device_id(pdev)->driver_data;
}
+static struct s5p_aes_dev *s5p_dev;
+
static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
@@ -436,15 +602,65 @@ static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
return ret;
}
+static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
+{
+ return __raw_readl(dd->io_hash_base + offset);
+}
+
+static inline void s5p_hash_write(struct s5p_aes_dev *dd,
+ u32 offset, u32 value)
+{
+ __raw_writel(value, dd->io_hash_base + offset);
+}
+
+/**
+ * s5p_set_dma_hashdata() - start DMA with sg
+ * @dev: device
+ * @sg: scatterlist ready to DMA transmit
+ */
+static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
+ struct scatterlist *sg)
+{
+ dev->hash_sg_cnt--;
+ SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
+ SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
+}
+
+/**
+ * s5p_hash_rx() - get next hash_sg_iter
+ * @dev: device
+ *
+ * Return:
+ * 2 if there is no more data and it is UPDATE op
+ * 1 if new receiving (input) data is ready and can be written to device
+ * 0 if there is no more data and it is FINAL op
+ */
+static int s5p_hash_rx(struct s5p_aes_dev *dev)
+{
+ if (dev->hash_sg_cnt > 0) {
+ dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
+ return 1;
+ }
+
+ set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
+ if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
+ return 0;
+
+ return 2;
+}
+
static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
int err_dma_tx = 0;
int err_dma_rx = 0;
+ int err_dma_hx = 0;
bool tx_end = false;
+ bool hx_end = false;
unsigned long flags;
uint32_t status;
+ u32 st_bits;
int err;
spin_lock_irqsave(&dev->lock, flags);
@@ -456,6 +672,8 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
*
* If there is no more data in tx scatter list, call s5p_aes_complete()
* and schedule new tasklet.
+ *
+ * Handle hx interrupt. If there is still data map next entry.
*/
status = SSS_READ(dev, FCINTSTAT);
if (status & SSS_FCINTSTAT_BRDMAINT)
@@ -467,7 +685,29 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
err_dma_tx = s5p_aes_tx(dev);
}
- SSS_WRITE(dev, FCINTPEND, status);
+ if (status & SSS_FCINTSTAT_HRDMAINT)
+ err_dma_hx = s5p_hash_rx(dev);
+
+ st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
+ SSS_FCINTSTAT_HRDMAINT);
+ /* clear DMA bits */
+ SSS_WRITE(dev, FCINTPEND, st_bits);
+
+ /* clear HASH irq bits */
+ if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
+ /* cannot have both HPART and HDONE */
+ if (status & SSS_FCINTSTAT_HPARTINT)
+ st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
+
+ if (status & SSS_FCINTSTAT_HDONEINT)
+ st_bits = SSS_HASH_STATUS_MSG_DONE;
+
+ set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
+ s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
+ hx_end = true;
+ /* when DONE or PART, do not handle HASH DMA */
+ err_dma_hx = 0;
+ }
if (err_dma_rx < 0) {
err = err_dma_rx;
@@ -480,6 +720,8 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
if (tx_end) {
s5p_sg_done(dev);
+ if (err_dma_hx == 1)
+ s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -497,21 +739,1100 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
s5p_set_dma_outdata(dev, dev->sg_dst);
if (err_dma_rx == 1)
s5p_set_dma_indata(dev, dev->sg_src);
+ if (err_dma_hx == 1)
+ s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
spin_unlock_irqrestore(&dev->lock, flags);
}
- return IRQ_HANDLED;
+ goto hash_irq_end;
error:
s5p_sg_done(dev);
dev->busy = false;
+ if (err_dma_hx == 1)
+ s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
+
spin_unlock_irqrestore(&dev->lock, flags);
s5p_aes_complete(dev, err);
+hash_irq_end:
+ /*
+ * Note about else if:
+ * when hash_sg_iter reaches end and its UPDATE op,
+ * issue SSS_HASH_PAUSE and wait for HPART irq
+ */
+ if (hx_end)
+ tasklet_schedule(&dev->hash_tasklet);
+ else if (err_dma_hx == 2)
+ s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
+ SSS_HASH_PAUSE);
+
return IRQ_HANDLED;
}
+/**
+ * s5p_hash_read_msg() - read message or IV from HW
+ * @req: AHASH request
+ */
+static void s5p_hash_read_msg(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct s5p_aes_dev *dd = ctx->dd;
+ u32 *hash = (u32 *)ctx->digest;
+ unsigned int i;
+
+ for (i = 0; i < ctx->nregs; i++)
+ hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
+}
+
+/**
+ * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
+ * @dd: device
+ * @ctx: request context
+ */
+static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
+ struct s5p_hash_reqctx *ctx)
+{
+ u32 *hash = (u32 *)ctx->digest;
+ unsigned int i;
+
+ for (i = 0; i < ctx->nregs; i++)
+ s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
+}
+
+/**
+ * s5p_hash_write_iv() - write IV for next partial/finup op.
+ * @req: AHASH request
+ */
+static void s5p_hash_write_iv(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ s5p_hash_write_ctx_iv(ctx->dd, ctx);
+}
+
+/**
+ * s5p_hash_copy_result() - copy digest into req->result
+ * @req: AHASH request
+ */
+static void s5p_hash_copy_result(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->result)
+ return;
+
+ memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
+}
+
+/**
+ * s5p_hash_dma_flush() - flush HASH DMA
+ * @dev: secss device
+ */
+static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
+{
+ SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
+}
+
+/**
+ * s5p_hash_dma_enable() - enable DMA mode for HASH
+ * @dev: secss device
+ *
+ * enable DMA mode for HASH
+ */
+static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
+{
+ s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
+}
+
+/**
+ * s5p_hash_irq_disable() - disable irq HASH signals
+ * @dev: secss device
+ * @flags: bitfield with irq's to be disabled
+ */
+static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
+{
+ SSS_WRITE(dev, FCINTENCLR, flags);
+}
+
+/**
+ * s5p_hash_irq_enable() - enable irq signals
+ * @dev: secss device
+ * @flags: bitfield with irq's to be enabled
+ */
+static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
+{
+ SSS_WRITE(dev, FCINTENSET, flags);
+}
+
+/**
+ * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
+ * @dev: secss device
+ * @hashflow: HASH stream flow with/without crypto AES/DES
+ */
+static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
+{
+ unsigned long flags;
+ u32 flow;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ flow = SSS_READ(dev, FCFIFOCTRL);
+ flow &= ~SSS_HASHIN_MASK;
+ flow |= hashflow;
+ SSS_WRITE(dev, FCFIFOCTRL, flow);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/**
+ * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
+ * @dev: secss device
+ * @hashflow: HASH stream flow with/without AES/DES
+ *
+ * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
+ * enable HASH irq's HRDMA, HDONE, HPART
+ */
+static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
+{
+ s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
+ SSS_FCINTENCLR_HDONEINTENCLR |
+ SSS_FCINTENCLR_HPARTINTENCLR);
+ s5p_hash_dma_flush(dev);
+
+ s5p_hash_dma_enable(dev);
+ s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
+ s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
+ SSS_FCINTENSET_HDONEINTENSET |
+ SSS_FCINTENSET_HPARTINTENSET);
+}
+
+/**
+ * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
+ * @dd: secss device
+ * @length: length for request
+ * @final: true if final op
+ *
+ * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
+ * after previous updates, fill up IV words. For final, calculate and set
+ * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
+ * length as 2^63 so it will be never reached and set to zero prelow and
+ * prehigh.
+ *
+ * This function does not start DMA transfer.
+ */
+static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
+ bool final)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+ u32 prelow, prehigh, low, high;
+ u32 configflags, swapflags;
+ u64 tmplen;
+
+ configflags = ctx->engine | SSS_HASH_INIT_BIT;
+
+ if (likely(ctx->digcnt)) {
+ s5p_hash_write_ctx_iv(dd, ctx);
+ configflags |= SSS_HASH_USER_IV_EN;
+ }
+
+ if (final) {
+ /* number of bytes for last part */
+ low = length;
+ high = 0;
+ /* total number of bits prev hashed */
+ tmplen = ctx->digcnt * 8;
+ prelow = (u32)tmplen;
+ prehigh = (u32)(tmplen >> 32);
+ } else {
+ prelow = 0;
+ prehigh = 0;
+ low = 0;
+ high = BIT(31);
+ }
+
+ swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
+ SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
+
+ s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
+ s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
+ s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
+ s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
+
+ s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
+ s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
+}
+
+/**
+ * s5p_hash_xmit_dma() - start DMA hash processing
+ * @dd: secss device
+ * @length: length for request
+ * @final: true if final op
+ *
+ * Update digcnt here, as it is needed for finup/final op.
+ */
+static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
+ bool final)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+ unsigned int cnt;
+
+ cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
+ if (!cnt) {
+ dev_err(dd->dev, "dma_map_sg error\n");
+ ctx->error = true;
+ return -EINVAL;
+ }
+
+ set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
+ dd->hash_sg_iter = ctx->sg;
+ dd->hash_sg_cnt = cnt;
+ s5p_hash_write_ctrl(dd, length, final);
+ ctx->digcnt += length;
+ ctx->total -= length;
+
+ /* catch last interrupt */
+ if (final)
+ set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
+
+ s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
+
+ return -EINPROGRESS;
+}
+
+/**
+ * s5p_hash_copy_sgs() - copy request's bytes into new buffer
+ * @ctx: request context
+ * @sg: source scatterlist request
+ * @new_len: number of bytes to process from sg
+ *
+ * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
+ * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
+ * with allocated buffer.
+ *
+ * Set bit in dd->hash_flag so we can free it after irq ends processing.
+ */
+static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
+ struct scatterlist *sg, unsigned int new_len)
+{
+ unsigned int pages, len;
+ void *buf;
+
+ len = new_len + ctx->bufcnt;
+ pages = get_order(len);
+
+ buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
+ if (!buf) {
+ dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
+ ctx->error = true;
+ return -ENOMEM;
+ }
+
+ if (ctx->bufcnt)
+ memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
+
+ scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
+ new_len, 0);
+ sg_init_table(ctx->sgl, 1);
+ sg_set_buf(ctx->sgl, buf, len);
+ ctx->sg = ctx->sgl;
+ ctx->sg_len = 1;
+ ctx->bufcnt = 0;
+ ctx->skip = 0;
+ set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
+
+ return 0;
+}
+
+/**
+ * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
+ * @ctx: request context
+ * @sg: source scatterlist request
+ * @new_len: number of bytes to process from sg
+ *
+ * Allocate new scatterlist table, copy data for HASH into it. If there was
+ * xmit_buf filled, prepare it first, then copy page, length and offset from
+ * source sg into it, adjusting begin and/or end for skip offset and
+ * hash_later value.
+ *
+ * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
+ * it after irq ends processing.
+ */
+static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
+ struct scatterlist *sg, unsigned int new_len)
+{
+ unsigned int skip = ctx->skip, n = sg_nents(sg);
+ struct scatterlist *tmp;
+ unsigned int len;
+
+ if (ctx->bufcnt)
+ n++;
+
+ ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
+ if (!ctx->sg) {
+ ctx->error = true;
+ return -ENOMEM;
+ }
+
+ sg_init_table(ctx->sg, n);
+
+ tmp = ctx->sg;
+
+ ctx->sg_len = 0;
+
+ if (ctx->bufcnt) {
+ sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
+ tmp = sg_next(tmp);
+ ctx->sg_len++;
+ }
+
+ while (sg && skip >= sg->length) {
+ skip -= sg->length;
+ sg = sg_next(sg);
+ }
+
+ while (sg && new_len) {
+ len = sg->length - skip;
+ if (new_len < len)
+ len = new_len;
+
+ new_len -= len;
+ sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
+ skip = 0;
+ if (new_len <= 0)
+ sg_mark_end(tmp);
+
+ tmp = sg_next(tmp);
+ ctx->sg_len++;
+ sg = sg_next(sg);
+ }
+
+ set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
+
+ return 0;
+}
+
+/**
+ * s5p_hash_prepare_sgs() - prepare sg for processing
+ * @ctx: request context
+ * @sg: source scatterlist request
+ * @nbytes: number of bytes to process from sg
+ * @final: final flag
+ *
+ * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
+ * sg table have good aligned elements (list_ok). If one of this checks fails,
+ * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
+ * data into this buffer and prepare request in sgl, or (2) allocates new sg
+ * table and prepare sg elements.
+ *
+ * For digest or finup all conditions can be good, and we may not need any
+ * fixes.
+ */
+static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
+ struct scatterlist *sg,
+ unsigned int new_len, bool final)
+{
+ unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
+ bool aligned = true, list_ok = true;
+ struct scatterlist *sg_tmp = sg;
+
+ if (!sg || !sg->length || !new_len)
+ return 0;
+
+ if (skip || !final)
+ list_ok = false;
+
+ while (nbytes > 0 && sg_tmp) {
+ n++;
+ if (skip >= sg_tmp->length) {
+ skip -= sg_tmp->length;
+ if (!sg_tmp->length) {
+ aligned = false;
+ break;
+ }
+ } else {
+ if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
+ aligned = false;
+ break;
+ }
+
+ if (nbytes < sg_tmp->length - skip) {
+ list_ok = false;
+ break;
+ }
+
+ nbytes -= sg_tmp->length - skip;
+ skip = 0;
+ }
+
+ sg_tmp = sg_next(sg_tmp);
+ }
+
+ if (!aligned)
+ return s5p_hash_copy_sgs(ctx, sg, new_len);
+ else if (!list_ok)
+ return s5p_hash_copy_sg_lists(ctx, sg, new_len);
+
+ /*
+ * Have aligned data from previous operation and/or current
+ * Note: will enter here only if (digest or finup) and aligned
+ */
+ if (ctx->bufcnt) {
+ ctx->sg_len = n;
+ sg_init_table(ctx->sgl, 2);
+ sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
+ sg_chain(ctx->sgl, 2, sg);
+ ctx->sg = ctx->sgl;
+ ctx->sg_len++;
+ } else {
+ ctx->sg = sg;
+ ctx->sg_len = n;
+ }
+
+ return 0;
+}
+
+/**
+ * s5p_hash_prepare_request() - prepare request for processing
+ * @req: AHASH request
+ * @update: true if UPDATE op
+ *
+ * Note 1: we can have update flag _and_ final flag at the same time.
+ * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
+ * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
+ * we have final op
+ */
+static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ bool final = ctx->finup;
+ int xmit_len, hash_later, nbytes;
+ int ret;
+
+ if (!req)
+ return 0;
+
+ if (update)
+ nbytes = req->nbytes;
+ else
+ nbytes = 0;
+
+ ctx->total = nbytes + ctx->bufcnt;
+ if (!ctx->total)
+ return 0;
+
+ if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
+ /* bytes left from previous request, so fill up to BUFLEN */
+ int len = BUFLEN - ctx->bufcnt % BUFLEN;
+
+ if (len > nbytes)
+ len = nbytes;
+
+ scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
+ 0, len, 0);
+ ctx->bufcnt += len;
+ nbytes -= len;
+ ctx->skip = len;
+ } else {
+ ctx->skip = 0;
+ }
+
+ if (ctx->bufcnt)
+ memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
+
+ xmit_len = ctx->total;
+ if (final) {
+ hash_later = 0;
+ } else {
+ if (IS_ALIGNED(xmit_len, BUFLEN))
+ xmit_len -= BUFLEN;
+ else
+ xmit_len -= xmit_len & (BUFLEN - 1);
+
+ hash_later = ctx->total - xmit_len;
+ /* copy hash_later bytes from end of req->src */
+ /* previous bytes are in xmit_buf, so no overwrite */
+ scatterwalk_map_and_copy(ctx->buffer, req->src,
+ req->nbytes - hash_later,
+ hash_later, 0);
+ }
+
+ if (xmit_len > BUFLEN) {
+ ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
+ final);
+ if (ret)
+ return ret;
+ } else {
+ /* have buffered data only */
+ if (unlikely(!ctx->bufcnt)) {
+ /* first update didn't fill up buffer */
+ scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
+ 0, xmit_len, 0);
+ }
+
+ sg_init_table(ctx->sgl, 1);
+ sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
+
+ ctx->sg = ctx->sgl;
+ ctx->sg_len = 1;
+ }
+
+ ctx->bufcnt = hash_later;
+ if (!final)
+ ctx->total = xmit_len;
+
+ return 0;
+}
+
+/**
+ * s5p_hash_update_dma_stop() - unmap DMA
+ * @dd: secss device
+ *
+ * Unmap scatterlist ctx->sg.
+ */
+static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+
+ dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
+ clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
+}
+
+/**
+ * s5p_hash_finish() - copy calculated digest to crypto layer
+ * @req: AHASH request
+ */
+static void s5p_hash_finish(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct s5p_aes_dev *dd = ctx->dd;
+
+ if (ctx->digcnt)
+ s5p_hash_copy_result(req);
+
+ dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
+}
+
+/**
+ * s5p_hash_finish_req() - finish request
+ * @req: AHASH request
+ * @err: error
+ */
+static void s5p_hash_finish_req(struct ahash_request *req, int err)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct s5p_aes_dev *dd = ctx->dd;
+ unsigned long flags;
+
+ if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
+ free_pages((unsigned long)sg_virt(ctx->sg),
+ get_order(ctx->sg->length));
+
+ if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
+ kfree(ctx->sg);
+
+ ctx->sg = NULL;
+ dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
+ BIT(HASH_FLAGS_SGS_COPIED));
+
+ if (!err && !ctx->error) {
+ s5p_hash_read_msg(req);
+ if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
+ s5p_hash_finish(req);
+ } else {
+ ctx->error = true;
+ }
+
+ spin_lock_irqsave(&dd->hash_lock, flags);
+ dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
+ BIT(HASH_FLAGS_DMA_READY) |
+ BIT(HASH_FLAGS_OUTPUT_READY));
+ spin_unlock_irqrestore(&dd->hash_lock, flags);
+
+ if (req->base.complete)
+ req->base.complete(&req->base, err);
+}
+
+/**
+ * s5p_hash_handle_queue() - handle hash queue
+ * @dd: device s5p_aes_dev
+ * @req: AHASH request
+ *
+ * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
+ * device then processes the first request from the dd->queue
+ *
+ * Returns: see s5p_hash_final below.
+ */
+static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
+ struct ahash_request *req)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct s5p_hash_reqctx *ctx;
+ unsigned long flags;
+ int err = 0, ret = 0;
+
+retry:
+ spin_lock_irqsave(&dd->hash_lock, flags);
+ if (req)
+ ret = ahash_enqueue_request(&dd->hash_queue, req);
+
+ if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
+ spin_unlock_irqrestore(&dd->hash_lock, flags);
+ return ret;
+ }
+
+ backlog = crypto_get_backlog(&dd->hash_queue);
+ async_req = crypto_dequeue_request(&dd->hash_queue);
+ if (async_req)
+ set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
+
+ spin_unlock_irqrestore(&dd->hash_lock, flags);
+
+ if (!async_req)
+ return ret;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ahash_request_cast(async_req);
+ dd->hash_req = req;
+ ctx = ahash_request_ctx(req);
+
+ err = s5p_hash_prepare_request(req, ctx->op_update);
+ if (err || !ctx->total)
+ goto out;
+
+ dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
+ ctx->op_update, req->nbytes);
+
+ s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
+ if (ctx->digcnt)
+ s5p_hash_write_iv(req); /* restore hash IV */
+
+ if (ctx->op_update) { /* HASH_OP_UPDATE */
+ err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
+ if (err != -EINPROGRESS && ctx->finup && !ctx->error)
+ /* no final() after finup() */
+ err = s5p_hash_xmit_dma(dd, ctx->total, true);
+ } else { /* HASH_OP_FINAL */
+ err = s5p_hash_xmit_dma(dd, ctx->total, true);
+ }
+out:
+ if (err != -EINPROGRESS) {
+ /* hash_tasklet_cb will not finish it, so do it here */
+ s5p_hash_finish_req(req, err);
+ req = NULL;
+
+ /*
+ * Execute next request immediately if there is anything
+ * in queue.
+ */
+ goto retry;
+ }
+
+ return ret;
+}
+
+/**
+ * s5p_hash_tasklet_cb() - hash tasklet
+ * @data: ptr to s5p_aes_dev
+ */
+static void s5p_hash_tasklet_cb(unsigned long data)
+{
+ struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
+
+ if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
+ s5p_hash_handle_queue(dd, NULL);
+ return;
+ }
+
+ if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
+ if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
+ &dd->hash_flags)) {
+ s5p_hash_update_dma_stop(dd);
+ }
+
+ if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
+ &dd->hash_flags)) {
+ /* hash or semi-hash ready */
+ clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
+ goto finish;
+ }
+ }
+
+ return;
+
+finish:
+ /* finish curent request */
+ s5p_hash_finish_req(dd->hash_req, 0);
+
+ /* If we are not busy, process next req */
+ if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
+ s5p_hash_handle_queue(dd, NULL);
+}
+
+/**
+ * s5p_hash_enqueue() - enqueue request
+ * @req: AHASH request
+ * @op: operation UPDATE (true) or FINAL (false)
+ *
+ * Returns: see s5p_hash_final below.
+ */
+static int s5p_hash_enqueue(struct ahash_request *req, bool op)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->op_update = op;
+
+ return s5p_hash_handle_queue(tctx->dd, req);
+}
+
+/**
+ * s5p_hash_update() - process the hash input data
+ * @req: AHASH request
+ *
+ * If request will fit in buffer, copy it and return immediately
+ * else enqueue it with OP_UPDATE.
+ *
+ * Returns: see s5p_hash_final below.
+ */
+static int s5p_hash_update(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return 0;
+
+ if (ctx->bufcnt + req->nbytes <= BUFLEN) {
+ scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
+ 0, req->nbytes, 0);
+ ctx->bufcnt += req->nbytes;
+ return 0;
+ }
+
+ return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
+}
+
+/**
+ * s5p_hash_shash_digest() - calculate shash digest
+ * @tfm: crypto transformation
+ * @flags: tfm flags
+ * @data: input data
+ * @len: length of data
+ * @out: output buffer
+ */
+static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+ shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_digest(shash, data, len, out);
+}
+
+/**
+ * s5p_hash_final_shash() - calculate shash digest
+ * @req: AHASH request
+ */
+static int s5p_hash_final_shash(struct ahash_request *req)
+{
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
+ ctx->buffer, ctx->bufcnt, req->result);
+}
+
+/**
+ * s5p_hash_final() - close up hash and calculate digest
+ * @req: AHASH request
+ *
+ * Note: in final req->src do not have any data, and req->nbytes can be
+ * non-zero.
+ *
+ * If there were no input data processed yet and the buffered hash data is
+ * less than BUFLEN (64) then calculate the final hash immediately by using
+ * SW algorithm fallback.
+ *
+ * Otherwise enqueues the current AHASH request with OP_FINAL operation op
+ * and finalize hash message in HW. Note that if digcnt!=0 then there were
+ * previous update op, so there are always some buffered bytes in ctx->buffer,
+ * which means that ctx->bufcnt!=0
+ *
+ * Returns:
+ * 0 if the request has been processed immediately,
+ * -EINPROGRESS if the operation has been queued for later execution or is set
+ * to processing by HW,
+ * -EBUSY if queue is full and request should be resubmitted later,
+ * other negative values denotes an error.
+ */
+static int s5p_hash_final(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ ctx->finup = true;
+ if (ctx->error)
+ return -EINVAL; /* uncompleted hash is not needed */
+
+ if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
+ return s5p_hash_final_shash(req);
+
+ return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
+}
+
+/**
+ * s5p_hash_finup() - process last req->src and calculate digest
+ * @req: AHASH request containing the last update data
+ *
+ * Return values: see s5p_hash_final above.
+ */
+static int s5p_hash_finup(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ int err1, err2;
+
+ ctx->finup = true;
+
+ err1 = s5p_hash_update(req);
+ if (err1 == -EINPROGRESS || err1 == -EBUSY)
+ return err1;
+
+ /*
+ * final() has to be always called to cleanup resources even if
+ * update() failed, except EINPROGRESS or calculate digest for small
+ * size
+ */
+ err2 = s5p_hash_final(req);
+
+ return err1 ?: err2;
+}
+
+/**
+ * s5p_hash_init() - initialize AHASH request contex
+ * @req: AHASH request
+ *
+ * Init async hash request context.
+ */
+static int s5p_hash_init(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ctx->dd = tctx->dd;
+ ctx->error = false;
+ ctx->finup = false;
+ ctx->bufcnt = 0;
+ ctx->digcnt = 0;
+ ctx->total = 0;
+ ctx->skip = 0;
+
+ dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
+ crypto_ahash_digestsize(tfm));
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case MD5_DIGEST_SIZE:
+ ctx->engine = SSS_HASH_ENGINE_MD5;
+ ctx->nregs = HASH_MD5_MAX_REG;
+ break;
+ case SHA1_DIGEST_SIZE:
+ ctx->engine = SSS_HASH_ENGINE_SHA1;
+ ctx->nregs = HASH_SHA1_MAX_REG;
+ break;
+ case SHA256_DIGEST_SIZE:
+ ctx->engine = SSS_HASH_ENGINE_SHA256;
+ ctx->nregs = HASH_SHA256_MAX_REG;
+ break;
+ default:
+ ctx->error = true;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * s5p_hash_digest - calculate digest from req->src
+ * @req: AHASH request
+ *
+ * Return values: see s5p_hash_final above.
+ */
+static int s5p_hash_digest(struct ahash_request *req)
+{
+ return s5p_hash_init(req) ?: s5p_hash_finup(req);
+}
+
+/**
+ * s5p_hash_cra_init_alg - init crypto alg transformation
+ * @tfm: crypto transformation
+ */
+static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
+{
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ tctx->dd = s5p_dev;
+ /* Allocate a fallback and abort if it failed. */
+ tctx->fallback = crypto_alloc_shash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tctx->fallback)) {
+ pr_err("fallback alloc fails for '%s'\n", alg_name);
+ return PTR_ERR(tctx->fallback);
+ }
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct s5p_hash_reqctx) + BUFLEN);
+
+ return 0;
+}
+
+/**
+ * s5p_hash_cra_init - init crypto tfm
+ * @tfm: crypto transformation
+ */
+static int s5p_hash_cra_init(struct crypto_tfm *tfm)
+{
+ return s5p_hash_cra_init_alg(tfm);
+}
+
+/**
+ * s5p_hash_cra_exit - exit crypto tfm
+ * @tfm: crypto transformation
+ *
+ * free allocated fallback
+ */
+static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(tctx->fallback);
+ tctx->fallback = NULL;
+}
+
+/**
+ * s5p_hash_export - export hash state
+ * @req: AHASH request
+ * @out: buffer for exported state
+ */
+static int s5p_hash_export(struct ahash_request *req, void *out)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
+
+ return 0;
+}
+
+/**
+ * s5p_hash_import - import hash state
+ * @req: AHASH request
+ * @in: buffer with state to be imported from
+ */
+static int s5p_hash_import(struct ahash_request *req, const void *in)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
+ const struct s5p_hash_reqctx *ctx_in = in;
+
+ memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
+ if (ctx_in->bufcnt > BUFLEN) {
+ ctx->error = true;
+ return -EINVAL;
+ }
+
+ ctx->dd = tctx->dd;
+ ctx->error = false;
+
+ return 0;
+}
+
+static struct ahash_alg algs_sha1_md5_sha256[] = {
+{
+ .init = s5p_hash_init,
+ .update = s5p_hash_update,
+ .final = s5p_hash_final,
+ .finup = s5p_hash_finup,
+ .digest = s5p_hash_digest,
+ .export = s5p_hash_export,
+ .import = s5p_hash_import,
+ .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "exynos-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = HASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_hash_ctx),
+ .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_hash_cra_init,
+ .cra_exit = s5p_hash_cra_exit,
+ }
+},
+{
+ .init = s5p_hash_init,
+ .update = s5p_hash_update,
+ .final = s5p_hash_final,
+ .finup = s5p_hash_finup,
+ .digest = s5p_hash_digest,
+ .export = s5p_hash_export,
+ .import = s5p_hash_import,
+ .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "md5",
+ .cra_driver_name = "exynos-md5",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = HASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_hash_ctx),
+ .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_hash_cra_init,
+ .cra_exit = s5p_hash_cra_exit,
+ }
+},
+{
+ .init = s5p_hash_init,
+ .update = s5p_hash_update,
+ .final = s5p_hash_final,
+ .finup = s5p_hash_finup,
+ .digest = s5p_hash_digest,
+ .export = s5p_hash_export,
+ .import = s5p_hash_import,
+ .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "exynos-sha256",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = HASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_hash_ctx),
+ .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_hash_cra_init,
+ .cra_exit = s5p_hash_cra_exit,
+ }
+}
+
+};
+
static void s5p_set_aes(struct s5p_aes_dev *dev,
uint8_t *key, uint8_t *iv, unsigned int keylen)
{
@@ -601,15 +1922,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
uint32_t aes_control;
unsigned long flags;
int err;
+ u8 *iv;
aes_control = SSS_AES_KEY_CHANGE_MODE;
if (mode & FLAGS_AES_DECRYPT)
aes_control |= SSS_AES_MODE_DECRYPT;
- if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
+ if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
aes_control |= SSS_AES_CHAIN_MODE_CBC;
- else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
+ iv = req->info;
+ } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
aes_control |= SSS_AES_CHAIN_MODE_CTR;
+ iv = req->info;
+ } else {
+ iv = NULL; /* AES_ECB */
+ }
if (dev->ctx->keylen == AES_KEYSIZE_192)
aes_control |= SSS_AES_KEY_SIZE_192;
@@ -640,7 +1967,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
goto outdata_error;
SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
- s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
+ s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
s5p_set_dma_indata(dev, dev->sg_src);
s5p_set_dma_outdata(dev, dev->sg_dst);
@@ -829,6 +2156,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
struct samsung_aes_variant *variant;
struct s5p_aes_dev *pdata;
struct resource *res;
+ unsigned int hash_i;
if (s5p_dev)
return -EEXIST;
@@ -837,12 +2165,34 @@ static int s5p_aes_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
+ variant = find_s5p_sss_version(pdev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pdata->ioaddr))
- return PTR_ERR(pdata->ioaddr);
- variant = find_s5p_sss_version(pdev);
+ /*
+ * Note: HASH and PRNG uses the same registers in secss, avoid
+ * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
+ * is enabled in config. We need larger size for HASH registers in
+ * secss, current describe only AES/DES
+ */
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
+ if (variant == &exynos_aes_data) {
+ res->end += 0x300;
+ pdata->use_hash = true;
+ }
+ }
+
+ pdata->res = res;
+ pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->ioaddr)) {
+ if (!pdata->use_hash)
+ return PTR_ERR(pdata->ioaddr);
+ /* try AES without HASH */
+ res->end -= 0x300;
+ pdata->use_hash = false;
+ pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->ioaddr))
+ return PTR_ERR(pdata->ioaddr);
+ }
pdata->clk = devm_clk_get(dev, "secss");
if (IS_ERR(pdata->clk)) {
@@ -857,8 +2207,10 @@ static int s5p_aes_probe(struct platform_device *pdev)
}
spin_lock_init(&pdata->lock);
+ spin_lock_init(&pdata->hash_lock);
pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
+ pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
pdata->irq_fc = platform_get_irq(pdev, 0);
if (pdata->irq_fc < 0) {
@@ -888,12 +2240,40 @@ static int s5p_aes_probe(struct platform_device *pdev)
goto err_algs;
}
+ if (pdata->use_hash) {
+ tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
+ (unsigned long)pdata);
+ crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
+
+ for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
+ hash_i++) {
+ struct ahash_alg *alg;
+
+ alg = &algs_sha1_md5_sha256[hash_i];
+ err = crypto_register_ahash(alg);
+ if (err) {
+ dev_err(dev, "can't register '%s': %d\n",
+ alg->halg.base.cra_driver_name, err);
+ goto err_hash;
+ }
+ }
+ }
+
dev_info(dev, "s5p-sss driver registered\n");
return 0;
+err_hash:
+ for (j = hash_i - 1; j >= 0; j--)
+ crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
+
+ tasklet_kill(&pdata->hash_tasklet);
+ res->end -= 0x300;
+
err_algs:
- dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);
+ if (i < ARRAY_SIZE(algs))
+ dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
+ err);
for (j = 0; j < i; j++)
crypto_unregister_alg(&algs[j]);
@@ -920,9 +2300,16 @@ static int s5p_aes_remove(struct platform_device *pdev)
crypto_unregister_alg(&algs[i]);
tasklet_kill(&pdata->tasklet);
+ if (pdata->use_hash) {
+ for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
+ crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
- clk_disable_unprepare(pdata->clk);
+ pdata->res->end -= 0x300;
+ tasklet_kill(&pdata->hash_tasklet);
+ pdata->use_hash = false;
+ }
+ clk_disable_unprepare(pdata->clk);
s5p_dev = NULL;
return 0;
@@ -942,3 +2329,4 @@ module_platform_driver(s5p_aes_crypto);
MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
+MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 602332e02729..63aa78c0b12b 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,4 +1,4 @@
-config CRC_DEV_STM32
+config CRYPTO_DEV_STM32_CRC
tristate "Support for STM32 crc accelerators"
depends on ARCH_STM32
select CRYPTO_HASH
@@ -6,7 +6,7 @@ config CRC_DEV_STM32
This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
-config HASH_DEV_STM32
+config CRYPTO_DEV_STM32_HASH
tristate "Support for STM32 hash accelerators"
depends on ARCH_STM32
depends on HAS_DMA
@@ -18,3 +18,12 @@ config HASH_DEV_STM32
help
This enables support for the HASH hw accelerator which can be found
on STMicroelectronics STM32 SOC.
+
+config CRYPTO_DEV_STM32_CRYP
+ tristate "Support for STM32 cryp accelerators"
+ depends on ARCH_STM32
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ help
+ This enables support for the CRYP (AES/DES/TDES) hw accelerator which
+ can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 73cd56cad0cc..53d1bb94b221 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o
-obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o \ No newline at end of file
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
new file mode 100644
index 000000000000..4a06a7a665ee
--- /dev/null
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -0,0 +1,1170 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Fabien Dessenne <fabien.dessenne@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+
+#define DRIVER_NAME "stm32-cryp"
+
+/* Bit [0] encrypt / decrypt */
+#define FLG_ENCRYPT BIT(0)
+/* Bit [8..1] algo & operation mode */
+#define FLG_AES BIT(1)
+#define FLG_DES BIT(2)
+#define FLG_TDES BIT(3)
+#define FLG_ECB BIT(4)
+#define FLG_CBC BIT(5)
+#define FLG_CTR BIT(6)
+/* Mode mask = bits [15..0] */
+#define FLG_MODE_MASK GENMASK(15, 0)
+
+/* Registers */
+#define CRYP_CR 0x00000000
+#define CRYP_SR 0x00000004
+#define CRYP_DIN 0x00000008
+#define CRYP_DOUT 0x0000000C
+#define CRYP_DMACR 0x00000010
+#define CRYP_IMSCR 0x00000014
+#define CRYP_RISR 0x00000018
+#define CRYP_MISR 0x0000001C
+#define CRYP_K0LR 0x00000020
+#define CRYP_K0RR 0x00000024
+#define CRYP_K1LR 0x00000028
+#define CRYP_K1RR 0x0000002C
+#define CRYP_K2LR 0x00000030
+#define CRYP_K2RR 0x00000034
+#define CRYP_K3LR 0x00000038
+#define CRYP_K3RR 0x0000003C
+#define CRYP_IV0LR 0x00000040
+#define CRYP_IV0RR 0x00000044
+#define CRYP_IV1LR 0x00000048
+#define CRYP_IV1RR 0x0000004C
+
+/* Registers values */
+#define CR_DEC_NOT_ENC 0x00000004
+#define CR_TDES_ECB 0x00000000
+#define CR_TDES_CBC 0x00000008
+#define CR_DES_ECB 0x00000010
+#define CR_DES_CBC 0x00000018
+#define CR_AES_ECB 0x00000020
+#define CR_AES_CBC 0x00000028
+#define CR_AES_CTR 0x00000030
+#define CR_AES_KP 0x00000038
+#define CR_AES_UNKNOWN 0xFFFFFFFF
+#define CR_ALGO_MASK 0x00080038
+#define CR_DATA32 0x00000000
+#define CR_DATA16 0x00000040
+#define CR_DATA8 0x00000080
+#define CR_DATA1 0x000000C0
+#define CR_KEY128 0x00000000
+#define CR_KEY192 0x00000100
+#define CR_KEY256 0x00000200
+#define CR_FFLUSH 0x00004000
+#define CR_CRYPEN 0x00008000
+
+#define SR_BUSY 0x00000010
+#define SR_OFNE 0x00000004
+
+#define IMSCR_IN BIT(0)
+#define IMSCR_OUT BIT(1)
+
+#define MISR_IN BIT(0)
+#define MISR_OUT BIT(1)
+
+/* Misc */
+#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
+#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
+#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+
+struct stm32_cryp_ctx {
+ struct stm32_cryp *cryp;
+ int keylen;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ unsigned long flags;
+};
+
+struct stm32_cryp_reqctx {
+ unsigned long mode;
+};
+
+struct stm32_cryp {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long flags;
+ u32 irq_status;
+ struct stm32_cryp_ctx *ctx;
+
+ struct crypto_engine *engine;
+
+ struct mutex lock; /* protects req */
+ struct ablkcipher_request *req;
+
+ size_t hw_blocksize;
+
+ size_t total_in;
+ size_t total_in_save;
+ size_t total_out;
+ size_t total_out_save;
+
+ struct scatterlist *in_sg;
+ struct scatterlist *out_sg;
+ struct scatterlist *out_sg_save;
+
+ struct scatterlist in_sgl;
+ struct scatterlist out_sgl;
+ bool sgs_copied;
+
+ int in_sg_len;
+ int out_sg_len;
+
+ struct scatter_walk in_walk;
+ struct scatter_walk out_walk;
+
+ u32 last_ctr[4];
+};
+
+struct stm32_cryp_list {
+ struct list_head dev_list;
+ spinlock_t lock; /* protect dev_list */
+};
+
+static struct stm32_cryp_list cryp_list = {
+ .dev_list = LIST_HEAD_INIT(cryp_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(cryp_list.lock),
+};
+
+static inline bool is_aes(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_AES;
+}
+
+static inline bool is_des(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_DES;
+}
+
+static inline bool is_tdes(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_TDES;
+}
+
+static inline bool is_ecb(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_ECB;
+}
+
+static inline bool is_cbc(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CBC;
+}
+
+static inline bool is_ctr(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CTR;
+}
+
+static inline bool is_encrypt(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_ENCRYPT;
+}
+
+static inline bool is_decrypt(struct stm32_cryp *cryp)
+{
+ return !is_encrypt(cryp);
+}
+
+static inline u32 stm32_cryp_read(struct stm32_cryp *cryp, u32 ofst)
+{
+ return readl_relaxed(cryp->regs + ofst);
+}
+
+static inline void stm32_cryp_write(struct stm32_cryp *cryp, u32 ofst, u32 val)
+{
+ writel_relaxed(val, cryp->regs + ofst);
+}
+
+static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
+ !(status & SR_BUSY), 10, 100000);
+}
+
+static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
+{
+ struct stm32_cryp *tmp, *cryp = NULL;
+
+ spin_lock_bh(&cryp_list.lock);
+ if (!ctx->cryp) {
+ list_for_each_entry(tmp, &cryp_list.dev_list, list) {
+ cryp = tmp;
+ break;
+ }
+ ctx->cryp = cryp;
+ } else {
+ cryp = ctx->cryp;
+ }
+
+ spin_unlock_bh(&cryp_list.lock);
+
+ return cryp;
+}
+
+static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total,
+ size_t align)
+{
+ int len = 0;
+
+ if (!total)
+ return 0;
+
+ if (!IS_ALIGNED(total, align))
+ return -EINVAL;
+
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(sg->length, align))
+ return -EINVAL;
+
+ len += sg->length;
+ sg = sg_next(sg);
+ }
+
+ if (len != total)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp)
+{
+ int ret;
+
+ ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in,
+ cryp->hw_blocksize);
+ if (ret)
+ return ret;
+
+ ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out,
+ cryp->hw_blocksize);
+
+ return ret;
+}
+
+static void sg_copy_buf(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_advance(&walk, start);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
+static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
+{
+ void *buf_in, *buf_out;
+ int pages, total_in, total_out;
+
+ if (!stm32_cryp_check_io_aligned(cryp)) {
+ cryp->sgs_copied = 0;
+ return 0;
+ }
+
+ total_in = ALIGN(cryp->total_in, cryp->hw_blocksize);
+ pages = total_in ? get_order(total_in) : 1;
+ buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ total_out = ALIGN(cryp->total_out, cryp->hw_blocksize);
+ pages = total_out ? get_order(total_out) : 1;
+ buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ if (!buf_in || !buf_out) {
+ dev_err(cryp->dev, "Can't allocate pages when unaligned\n");
+ cryp->sgs_copied = 0;
+ return -EFAULT;
+ }
+
+ sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0);
+
+ sg_init_one(&cryp->in_sgl, buf_in, total_in);
+ cryp->in_sg = &cryp->in_sgl;
+ cryp->in_sg_len = 1;
+
+ sg_init_one(&cryp->out_sgl, buf_out, total_out);
+ cryp->out_sg_save = cryp->out_sg;
+ cryp->out_sg = &cryp->out_sgl;
+ cryp->out_sg_len = 1;
+
+ cryp->sgs_copied = 1;
+
+ return 0;
+}
+
+static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
+{
+ if (!iv)
+ return;
+
+ stm32_cryp_write(cryp, CRYP_IV0LR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV0RR, cpu_to_be32(*iv++));
+
+ if (is_aes(cryp)) {
+ stm32_cryp_write(cryp, CRYP_IV1LR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV1RR, cpu_to_be32(*iv++));
+ }
+}
+
+static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
+{
+ unsigned int i;
+ int r_id;
+
+ if (is_des(c)) {
+ stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0]));
+ stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1]));
+ } else {
+ r_id = CRYP_K3RR;
+ for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4)
+ stm32_cryp_write(c, r_id,
+ cpu_to_be32(c->ctx->key[i - 1]));
+ }
+}
+
+static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
+{
+ if (is_aes(cryp) && is_ecb(cryp))
+ return CR_AES_ECB;
+
+ if (is_aes(cryp) && is_cbc(cryp))
+ return CR_AES_CBC;
+
+ if (is_aes(cryp) && is_ctr(cryp))
+ return CR_AES_CTR;
+
+ if (is_des(cryp) && is_ecb(cryp))
+ return CR_DES_ECB;
+
+ if (is_des(cryp) && is_cbc(cryp))
+ return CR_DES_CBC;
+
+ if (is_tdes(cryp) && is_ecb(cryp))
+ return CR_TDES_ECB;
+
+ if (is_tdes(cryp) && is_cbc(cryp))
+ return CR_TDES_CBC;
+
+ dev_err(cryp->dev, "Unknown mode\n");
+ return CR_AES_UNKNOWN;
+}
+
+static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
+{
+ int ret;
+ u32 cfg, hw_mode;
+
+ /* Disable interrupt */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+
+ /* Set key */
+ stm32_cryp_hw_write_key(cryp);
+
+ /* Set configuration */
+ cfg = CR_DATA8 | CR_FFLUSH;
+
+ switch (cryp->ctx->keylen) {
+ case AES_KEYSIZE_128:
+ cfg |= CR_KEY128;
+ break;
+
+ case AES_KEYSIZE_192:
+ cfg |= CR_KEY192;
+ break;
+
+ default:
+ case AES_KEYSIZE_256:
+ cfg |= CR_KEY256;
+ break;
+ }
+
+ hw_mode = stm32_cryp_get_hw_mode(cryp);
+ if (hw_mode == CR_AES_UNKNOWN)
+ return -EINVAL;
+
+ /* AES ECB/CBC decrypt: run key preparation first */
+ if (is_decrypt(cryp) &&
+ ((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) {
+ stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP | CR_CRYPEN);
+
+ /* Wait for end of processing */
+ ret = stm32_cryp_wait_busy(cryp);
+ if (ret) {
+ dev_err(cryp->dev, "Timeout (key preparation)\n");
+ return ret;
+ }
+ }
+
+ cfg |= hw_mode;
+
+ if (is_decrypt(cryp))
+ cfg |= CR_DEC_NOT_ENC;
+
+ /* Apply config and flush (valid when CRYPEN = 0) */
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ switch (hw_mode) {
+ case CR_DES_CBC:
+ case CR_TDES_CBC:
+ case CR_AES_CBC:
+ case CR_AES_CTR:
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Enable now */
+ cfg |= CR_CRYPEN;
+
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ return 0;
+}
+
+static void stm32_cryp_finish_req(struct stm32_cryp *cryp)
+{
+ int err = 0;
+
+ if (cryp->sgs_copied) {
+ void *buf_in, *buf_out;
+ int pages, len;
+
+ buf_in = sg_virt(&cryp->in_sgl);
+ buf_out = sg_virt(&cryp->out_sgl);
+
+ sg_copy_buf(buf_out, cryp->out_sg_save, 0,
+ cryp->total_out_save, 1);
+
+ len = ALIGN(cryp->total_in_save, cryp->hw_blocksize);
+ pages = len ? get_order(len) : 1;
+ free_pages((unsigned long)buf_in, pages);
+
+ len = ALIGN(cryp->total_out_save, cryp->hw_blocksize);
+ pages = len ? get_order(len) : 1;
+ free_pages((unsigned long)buf_out, pages);
+ }
+
+ crypto_finalize_cipher_request(cryp->engine, cryp->req, err);
+ cryp->req = NULL;
+
+ memset(cryp->ctx->key, 0, cryp->ctx->keylen);
+
+ mutex_unlock(&cryp->lock);
+}
+
+static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
+{
+ /* Enable interrupt and let the IRQ handler do everything */
+ stm32_cryp_write(cryp, CRYP_IMSCR, IMSCR_IN | IMSCR_OUT);
+
+ return 0;
+}
+
+static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
+
+ return 0;
+}
+
+static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
+
+ if (!cryp)
+ return -ENODEV;
+
+ rctx->mode = mode;
+
+ return crypto_transfer_cipher_request_to_engine(cryp->engine, req);
+}
+
+static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != (3 * DES_KEY_SIZE))
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
+}
+
+static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
+}
+
+static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
+}
+
+static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
+}
+
+static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
+}
+
+static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
+}
+
+static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
+}
+
+static int stm32_cryp_prepare_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct stm32_cryp_ctx *ctx;
+ struct stm32_cryp *cryp;
+ struct stm32_cryp_reqctx *rctx;
+ int ret;
+
+ if (!req)
+ return -EINVAL;
+
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ mutex_lock(&cryp->lock);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->mode &= FLG_MODE_MASK;
+
+ ctx->cryp = cryp;
+
+ cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode;
+ cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE;
+ cryp->ctx = ctx;
+
+ cryp->req = req;
+ cryp->total_in = req->nbytes;
+ cryp->total_out = cryp->total_in;
+
+ cryp->total_in_save = cryp->total_in;
+ cryp->total_out_save = cryp->total_out;
+
+ cryp->in_sg = req->src;
+ cryp->out_sg = req->dst;
+ cryp->out_sg_save = cryp->out_sg;
+
+ cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
+ if (cryp->in_sg_len < 0) {
+ dev_err(cryp->dev, "Cannot get in_sg_len\n");
+ ret = cryp->in_sg_len;
+ goto out;
+ }
+
+ cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
+ if (cryp->out_sg_len < 0) {
+ dev_err(cryp->dev, "Cannot get out_sg_len\n");
+ ret = cryp->out_sg_len;
+ goto out;
+ }
+
+ ret = stm32_cryp_copy_sgs(cryp);
+ if (ret)
+ goto out;
+
+ scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+ scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+
+ ret = stm32_cryp_hw_init(cryp);
+out:
+ if (ret)
+ mutex_unlock(&cryp->lock);
+
+ return ret;
+}
+
+static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ return stm32_cryp_prepare_req(engine, req);
+}
+
+static int stm32_cryp_cipher_one_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp *cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ return stm32_cryp_cpu_start(cryp);
+}
+
+static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst,
+ unsigned int n)
+{
+ scatterwalk_advance(&cryp->out_walk, n);
+
+ if (unlikely(cryp->out_sg->length == _walked_out)) {
+ cryp->out_sg = sg_next(cryp->out_sg);
+ if (cryp->out_sg) {
+ scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+ return (sg_virt(cryp->out_sg) + _walked_out);
+ }
+ }
+
+ return (u32 *)((u8 *)dst + n);
+}
+
+static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src,
+ unsigned int n)
+{
+ scatterwalk_advance(&cryp->in_walk, n);
+
+ if (unlikely(cryp->in_sg->length == _walked_in)) {
+ cryp->in_sg = sg_next(cryp->in_sg);
+ if (cryp->in_sg) {
+ scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+ return (sg_virt(cryp->in_sg) + _walked_in);
+ }
+ }
+
+ return (u32 *)((u8 *)src + n);
+}
+
+static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
+{
+ u32 cr;
+
+ if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) {
+ cryp->last_ctr[3] = 0;
+ cryp->last_ctr[2]++;
+ if (!cryp->last_ctr[2]) {
+ cryp->last_ctr[1]++;
+ if (!cryp->last_ctr[1])
+ cryp->last_ctr[0]++;
+ }
+
+ cr = stm32_cryp_read(cryp, CRYP_CR);
+ stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN);
+
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr);
+
+ stm32_cryp_write(cryp, CRYP_CR, cr);
+ }
+
+ cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR);
+ cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR);
+ cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR);
+ cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR);
+}
+
+static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
+{
+ unsigned int i, j;
+ u32 d32, *dst;
+ u8 *d8;
+
+ dst = sg_virt(cryp->out_sg) + _walked_out;
+
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+ if (likely(cryp->total_out >= sizeof(u32))) {
+ /* Read a full u32 */
+ *dst = stm32_cryp_read(cryp, CRYP_DOUT);
+
+ dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
+ cryp->total_out -= sizeof(u32);
+ } else if (!cryp->total_out) {
+ /* Empty fifo out (data from input padding) */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ } else {
+ /* Read less than an u32 */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ d8 = (u8 *)&d32;
+
+ for (j = 0; j < cryp->total_out; j++) {
+ *((u8 *)dst) = *(d8++);
+ dst = stm32_cryp_next_out(cryp, dst, 1);
+ }
+ cryp->total_out = 0;
+ }
+ }
+
+ return !cryp->total_out || !cryp->total_in;
+}
+
+static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
+{
+ unsigned int i, j;
+ u32 *src;
+ u8 d8[4];
+
+ src = sg_virt(cryp->in_sg) + _walked_in;
+
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+ if (likely(cryp->total_in >= sizeof(u32))) {
+ /* Write a full u32 */
+ stm32_cryp_write(cryp, CRYP_DIN, *src);
+
+ src = stm32_cryp_next_in(cryp, src, sizeof(u32));
+ cryp->total_in -= sizeof(u32);
+ } else if (!cryp->total_in) {
+ /* Write padding data */
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+ } else {
+ /* Write less than an u32 */
+ memset(d8, 0, sizeof(u32));
+ for (j = 0; j < cryp->total_in; j++) {
+ d8[j] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+ }
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+ cryp->total_in = 0;
+ }
+ }
+}
+
+static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
+{
+ if (unlikely(!cryp->total_in)) {
+ dev_warn(cryp->dev, "No more data to process\n");
+ return;
+ }
+
+ if (is_aes(cryp) && is_ctr(cryp))
+ stm32_cryp_check_ctr_counter(cryp);
+
+ stm32_cryp_irq_write_block(cryp);
+}
+
+static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
+{
+ struct stm32_cryp *cryp = arg;
+
+ if (cryp->irq_status & MISR_OUT)
+ /* Output FIFO IRQ: read data */
+ if (unlikely(stm32_cryp_irq_read_data(cryp))) {
+ /* All bytes processed, finish */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_finish_req(cryp);
+ return IRQ_HANDLED;
+ }
+
+ if (cryp->irq_status & MISR_IN) {
+ /* Input FIFO IRQ: write data */
+ stm32_cryp_irq_write_data(cryp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_cryp_irq(int irq, void *arg)
+{
+ struct stm32_cryp *cryp = arg;
+
+ cryp->irq_status = stm32_cryp_read(cryp, CRYP_MISR);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static struct crypto_alg crypto_algs[] = {
+{
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "stm32-ecb-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ecb_encrypt,
+ .decrypt = stm32_cryp_aes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "stm32-cbc-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_cbc_encrypt,
+ .decrypt = stm32_cryp_aes_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "stm32-ctr-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ctr_encrypt,
+ .decrypt = stm32_cryp_aes_ctr_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "stm32-ecb-des",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_ecb_encrypt,
+ .decrypt = stm32_cryp_des_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "stm32-cbc-des",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_cbc_encrypt,
+ .decrypt = stm32_cryp_des_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "stm32-ecb-des3",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_ecb_encrypt,
+ .decrypt = stm32_cryp_tdes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "stm32-cbc-des3",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_cbc_encrypt,
+ .decrypt = stm32_cryp_tdes_cbc_decrypt,
+ }
+},
+};
+
+static const struct of_device_id stm32_dt_ids[] = {
+ { .compatible = "st,stm32f756-cryp", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_dt_ids);
+
+static int stm32_cryp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_cryp *cryp;
+ struct resource *res;
+ struct reset_control *rst;
+ int irq, ret;
+
+ cryp = devm_kzalloc(dev, sizeof(*cryp), GFP_KERNEL);
+ if (!cryp)
+ return -ENOMEM;
+
+ cryp->dev = dev;
+
+ mutex_init(&cryp->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cryp->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cryp->regs))
+ return PTR_ERR(cryp->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot get IRQ resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq,
+ stm32_cryp_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), cryp);
+ if (ret) {
+ dev_err(dev, "Cannot grab IRQ\n");
+ return ret;
+ }
+
+ cryp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cryp->clk)) {
+ dev_err(dev, "Could not get clock\n");
+ return PTR_ERR(cryp->clk);
+ }
+
+ ret = clk_prepare_enable(cryp->clk);
+ if (ret) {
+ dev_err(cryp->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ rst = devm_reset_control_get(dev, NULL);
+ if (!IS_ERR(rst)) {
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+ }
+
+ platform_set_drvdata(pdev, cryp);
+
+ spin_lock(&cryp_list.lock);
+ list_add(&cryp->list, &cryp_list.dev_list);
+ spin_unlock(&cryp_list.lock);
+
+ /* Initialize crypto engine */
+ cryp->engine = crypto_engine_alloc_init(dev, 1);
+ if (!cryp->engine) {
+ dev_err(dev, "Could not init crypto engine\n");
+ ret = -ENOMEM;
+ goto err_engine1;
+ }
+
+ cryp->engine->prepare_cipher_request = stm32_cryp_prepare_cipher_req;
+ cryp->engine->cipher_one_request = stm32_cryp_cipher_one_req;
+
+ ret = crypto_engine_start(cryp->engine);
+ if (ret) {
+ dev_err(dev, "Could not start crypto engine\n");
+ goto err_engine2;
+ }
+
+ ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ if (ret) {
+ dev_err(dev, "Could not register algs\n");
+ goto err_algs;
+ }
+
+ dev_info(dev, "Initialized\n");
+
+ return 0;
+
+err_algs:
+err_engine2:
+ crypto_engine_exit(cryp->engine);
+err_engine1:
+ spin_lock(&cryp_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&cryp_list.lock);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return ret;
+}
+
+static int stm32_cryp_remove(struct platform_device *pdev)
+{
+ struct stm32_cryp *cryp = platform_get_drvdata(pdev);
+
+ if (!cryp)
+ return -ENODEV;
+
+ crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+
+ crypto_engine_exit(cryp->engine);
+
+ spin_lock(&cryp_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&cryp_list.lock);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return 0;
+}
+
+static struct platform_driver stm32_cryp_driver = {
+ .probe = stm32_cryp_probe,
+ .remove = stm32_cryp_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = stm32_dt_ids,
+ },
+};
+
+module_platform_driver(stm32_cryp_driver);
+
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 4835dd4a9e50..4ca4a264a833 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -895,7 +895,6 @@ static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
static int stm32_hash_update(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- int ret;
if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
return 0;
@@ -909,12 +908,7 @@ static int stm32_hash_update(struct ahash_request *req)
return 0;
}
- ret = stm32_hash_enqueue(req, HASH_OP_UPDATE);
-
- if (rctx->flags & HASH_FLAGS_FINUP)
- return ret;
-
- return 0;
+ return stm32_hash_enqueue(req, HASH_OP_UPDATE);
}
static int stm32_hash_final(struct ahash_request *req)
@@ -1070,7 +1064,6 @@ static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
{
struct stm32_hash_dev *hdev = dev_id;
- int err;
if (HASH_FLAGS_CPU & hdev->flags) {
if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
@@ -1087,8 +1080,8 @@ static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
return IRQ_HANDLED;
finish:
- /*Finish current request */
- stm32_hash_finish_req(hdev->req, err);
+ /* Finish current request */
+ stm32_hash_finish_req(hdev->req, 0);
return IRQ_HANDLED;
}
@@ -1411,11 +1404,10 @@ MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
struct device *dev)
{
- const struct of_device_id *match;
int err;
- match = of_match_device(stm32_hash_of_match, dev);
- if (!match) {
+ hdev->pdata = of_device_get_match_data(dev);
+ if (!hdev->pdata) {
dev_err(dev, "no compatible OF match\n");
return -EINVAL;
}
@@ -1423,8 +1415,6 @@ static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
err = of_property_read_u32(dev->of_node, "dma-maxburst",
&hdev->dma_maxburst);
- hdev->pdata = match->data;
-
return err;
}
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 090582baecfe..8f09b8430893 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -208,6 +208,7 @@ static struct shash_alg algs[] = {
.cra_name = "crc32",
.cra_driver_name = DRIVER_NAME,
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
@@ -229,6 +230,7 @@ static struct shash_alg algs[] = {
.cra_name = "crc32c",
.cra_driver_name = DRIVER_NAME,
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
index 0d01d1624252..63d636424161 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
@@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
ss = algt->ss;
- spin_lock(&ss->slock);
+ spin_lock_bh(&ss->slock);
writel(mode, ss->base + SS_CTL);
@@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
}
writel(0, ss->base + SS_CTL);
- spin_unlock(&ss->slock);
- return dlen;
+ spin_unlock_bh(&ss->slock);
+ return 0;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dff88838dce7..6882fa2f8bad 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -56,29 +56,26 @@
#include "talitos.h"
static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
- bool is_sec1)
+ unsigned int len, bool is_sec1)
{
ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
- if (!is_sec1)
+ if (is_sec1) {
+ ptr->len1 = cpu_to_be16(len);
+ } else {
+ ptr->len = cpu_to_be16(len);
ptr->eptr = upper_32_bits(dma_addr);
+ }
}
static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
struct talitos_ptr *src_ptr, bool is_sec1)
{
dst_ptr->ptr = src_ptr->ptr;
- if (!is_sec1)
- dst_ptr->eptr = src_ptr->eptr;
-}
-
-static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
- bool is_sec1)
-{
if (is_sec1) {
- ptr->res = 0;
- ptr->len1 = cpu_to_be16(len);
+ dst_ptr->len1 = src_ptr->len1;
} else {
- ptr->len = cpu_to_be16(len);
+ dst_ptr->len = src_ptr->len;
+ dst_ptr->eptr = src_ptr->eptr;
}
}
@@ -116,9 +113,7 @@ static void map_single_talitos_ptr(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
- to_talitos_ptr_len(ptr, len, is_sec1);
- to_talitos_ptr(ptr, dma_addr, is_sec1);
- to_talitos_ptr_ext_set(ptr, 0, is_sec1);
+ to_talitos_ptr(ptr, dma_addr, len, is_sec1);
}
/*
@@ -165,6 +160,10 @@ static int reset_channel(struct device *dev, int ch)
/* set 36-bit addressing, done writeback enable and done IRQ enable */
setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
+ /* enable chaining descriptors */
+ if (is_sec1)
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
+ TALITOS_CCCR_LO_NE);
/* and ICCR writeback, if available */
if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
@@ -287,7 +286,6 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
/* map descriptor and save caller data */
if (is_sec1) {
desc->hdr1 = desc->hdr;
- desc->next_desc = 0;
request->dma_desc = dma_map_single(dev, &desc->hdr1,
TALITOS_DESC_SIZE,
DMA_BIDIRECTIONAL);
@@ -339,7 +337,12 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
/* descriptors with their done bits set don't get the error */
rmb();
- hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
+ if (!is_sec1)
+ hdr = request->desc->hdr;
+ else if (request->desc->next_desc)
+ hdr = (request->desc + 1)->hdr1;
+ else
+ hdr = request->desc->hdr1;
if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
status = 0;
@@ -393,8 +396,6 @@ static void talitos1_done_##name(unsigned long data) \
\
if (ch_done_mask & 0x10000000) \
flush_channel(dev, 0, 0, 0); \
- if (priv->num_channels == 1) \
- goto out; \
if (ch_done_mask & 0x40000000) \
flush_channel(dev, 1, 0, 0); \
if (ch_done_mask & 0x00010000) \
@@ -402,7 +403,6 @@ static void talitos1_done_##name(unsigned long data) \
if (ch_done_mask & 0x00040000) \
flush_channel(dev, 3, 0, 0); \
\
-out: \
/* At this point, all completed channels have been processed */ \
/* Unmask done interrupts for channels completed later on. */ \
spin_lock_irqsave(&priv->reg_lock, flags); \
@@ -412,6 +412,7 @@ out: \
}
DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
+DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
#define DEF_TALITOS2_DONE(name, ch_done_mask) \
static void talitos2_done_##name(unsigned long data) \
@@ -422,8 +423,6 @@ static void talitos2_done_##name(unsigned long data) \
\
if (ch_done_mask & 1) \
flush_channel(dev, 0, 0, 0); \
- if (priv->num_channels == 1) \
- goto out; \
if (ch_done_mask & (1 << 2)) \
flush_channel(dev, 1, 0, 0); \
if (ch_done_mask & (1 << 4)) \
@@ -431,7 +430,6 @@ static void talitos2_done_##name(unsigned long data) \
if (ch_done_mask & (1 << 6)) \
flush_channel(dev, 3, 0, 0); \
\
-out: \
/* At this point, all completed channels have been processed */ \
/* Unmask done interrupts for channels completed later on. */ \
spin_lock_irqsave(&priv->reg_lock, flags); \
@@ -441,6 +439,7 @@ out: \
}
DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
+DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
@@ -464,7 +463,8 @@ static u32 current_desc_hdr(struct device *dev, int ch)
tail = priv->chan[ch].tail;
iter = tail;
- while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
+ while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
+ priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
iter = (iter + 1) & (priv->fifo_len - 1);
if (iter == tail) {
dev_err(dev, "couldn't locate current descriptor\n");
@@ -472,6 +472,9 @@ static u32 current_desc_hdr(struct device *dev, int ch)
}
}
+ if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
+ return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
+
return priv->chan[ch].fifo[iter].desc->hdr;
}
@@ -825,9 +828,12 @@ struct talitos_ctx {
__be32 desc_hdr_template;
u8 key[TALITOS_MAX_KEY_SIZE];
u8 iv[TALITOS_MAX_IV_LENGTH];
+ dma_addr_t dma_key;
unsigned int keylen;
unsigned int enckeylen;
unsigned int authkeylen;
+ dma_addr_t dma_buf;
+ dma_addr_t dma_hw_context;
};
#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
@@ -836,8 +842,8 @@ struct talitos_ctx {
struct talitos_ahash_req_ctx {
u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
unsigned int hw_context_size;
- u8 buf[HASH_MAX_BLOCK_SIZE];
- u8 bufnext[HASH_MAX_BLOCK_SIZE];
+ u8 buf[2][HASH_MAX_BLOCK_SIZE];
+ int buf_idx;
unsigned int swinit;
unsigned int first;
unsigned int last;
@@ -861,6 +867,7 @@ static int aead_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = ctx->dev;
struct crypto_authenc_keys keys;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
@@ -869,12 +876,17 @@ static int aead_setkey(struct crypto_aead *authenc,
if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
goto badkey;
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
memcpy(ctx->key, keys.authkey, keys.authkeylen);
memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
ctx->keylen = keys.authkeylen + keys.enckeylen;
ctx->enckeylen = keys.enckeylen;
ctx->authkeylen = keys.authkeylen;
+ ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
+ DMA_TO_DEVICE);
return 0;
@@ -948,13 +960,13 @@ static void ipsec_esp_unmap(struct device *dev,
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
+ bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
+ struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
- if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ if (is_ipsec_esp)
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
DMA_FROM_DEVICE);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
+ unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
areq->assoclen);
@@ -963,7 +975,7 @@ static void ipsec_esp_unmap(struct device *dev,
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
- if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
+ if (!is_ipsec_esp) {
unsigned int dst_nents = edesc->dst_nents ? : 1;
sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
@@ -983,6 +995,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
struct aead_request *areq = context;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
unsigned int authsize = crypto_aead_authsize(authenc);
+ unsigned int ivsize = crypto_aead_ivsize(authenc);
struct talitos_edesc *edesc;
struct scatterlist *sg;
void *icvdata;
@@ -1003,6 +1016,8 @@ static void ipsec_esp_encrypt_done(struct device *dev,
icvdata, authsize);
}
+ dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
+
kfree(edesc);
aead_request_complete(areq, err);
@@ -1097,8 +1112,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
len = cryptlen;
to_talitos_ptr(link_tbl_ptr + count,
- sg_dma_address(sg) + offset, 0);
- to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
+ sg_dma_address(sg) + offset, len, 0);
to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
count++;
cryptlen -= len;
@@ -1116,7 +1130,7 @@ next:
return count;
}
-int talitos_sg_map(struct device *dev, struct scatterlist *src,
+static int talitos_sg_map(struct device *dev, struct scatterlist *src,
unsigned int len, struct talitos_edesc *edesc,
struct talitos_ptr *ptr,
int sg_count, unsigned int offset, int tbl_off)
@@ -1124,15 +1138,16 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
- to_talitos_ptr_len(ptr, len, is_sec1);
- to_talitos_ptr_ext_set(ptr, 0, is_sec1);
-
+ if (!src) {
+ to_talitos_ptr(ptr, 0, 0, is_sec1);
+ return 1;
+ }
if (sg_count == 1) {
- to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
+ to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
return sg_count;
}
if (is_sec1) {
- to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
+ to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
return sg_count;
}
sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
@@ -1143,7 +1158,7 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src,
return sg_count;
}
to_talitos_ptr(ptr, edesc->dma_link_tbl +
- tbl_off * sizeof(struct talitos_ptr), is_sec1);
+ tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
return sg_count;
@@ -1170,10 +1185,12 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
+ struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
+ struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
/* hmac key */
- map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
- DMA_TO_DEVICE);
+ to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
@@ -1194,25 +1211,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
}
/* cipher iv */
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
- to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
- to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
- } else {
- to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
- to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
- }
+ to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
/* cipher key */
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
- map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
- (char *)&ctx->key + ctx->authkeylen,
- DMA_TO_DEVICE);
- else
- map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
- (char *)&ctx->key + ctx->authkeylen,
- DMA_TO_DEVICE);
+ to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
+ ctx->enckeylen, is_sec1);
/*
* cipher in
@@ -1220,24 +1223,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
* extent is bytes of HMAC postpended to ciphertext,
* typically 12 for ipsec
*/
- to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
-
sg_link_tbl_len = cryptlen;
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
+ if (is_ipsec_esp) {
to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
- if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+ if (desc->hdr & DESC_HDR_MODE1_MDEU_CICV)
sg_link_tbl_len += authsize;
}
- sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
- &desc->ptr[4], sg_count, areq->assoclen,
- tbl_off);
+ ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
+ &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
- if (sg_count > 1) {
- tbl_off += sg_count;
+ if (ret > 1) {
+ tbl_off += ret;
sync_needed = true;
}
@@ -1248,47 +1247,59 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
}
- sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
- &desc->ptr[5], sg_count, areq->assoclen,
- tbl_off);
+ ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+ sg_count, areq->assoclen, tbl_off);
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ if (is_ipsec_esp)
to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
- if (sg_count > 1) {
+ /* ICV data */
+ if (ret > 1) {
+ tbl_off += ret;
edesc->icv_ool = true;
sync_needed = true;
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
+ if (is_ipsec_esp) {
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
int offset = (edesc->src_nents + edesc->dst_nents + 2) *
sizeof(struct talitos_ptr) + authsize;
/* Add an entry to the link table for ICV data */
- tbl_ptr += sg_count - 1;
- to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
- tbl_ptr++;
+ to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
is_sec1);
- to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
/* icv data follows link tables */
to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
- is_sec1);
+ authsize, is_sec1);
+ } else {
+ dma_addr_t addr = edesc->dma_link_tbl;
+
+ if (is_sec1)
+ addr += areq->assoclen + cryptlen;
+ else
+ addr += sizeof(struct talitos_ptr) * tbl_off;
+
+ to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
+ }
+ } else if (!is_ipsec_esp) {
+ ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
+ &desc->ptr[6], sg_count, areq->assoclen +
+ cryptlen,
+ tbl_off);
+ if (ret > 1) {
+ tbl_off += ret;
+ edesc->icv_ool = true;
+ sync_needed = true;
+ } else {
+ edesc->icv_ool = false;
}
} else {
edesc->icv_ool = false;
}
- /* ICV data */
- if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
- to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
- to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
- areq->assoclen + cryptlen, is_sec1);
- }
-
/* iv out */
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ if (is_ipsec_esp)
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
DMA_FROM_DEVICE);
@@ -1387,22 +1398,31 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
alloc_len += icv_stashing ? authsize : 0;
}
+ /* if its a ahash, add space for a second desc next to the first one */
+ if (is_sec1 && !dst)
+ alloc_len += sizeof(struct talitos_desc);
+
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
dev_err(dev, "could not allocate edescriptor\n");
err = ERR_PTR(-ENOMEM);
goto error_sg;
}
+ memset(&edesc->desc, 0, sizeof(edesc->desc));
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
edesc->dma_len = dma_len;
- if (dma_len)
- edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
+ if (dma_len) {
+ void *addr = &edesc->link_tbl[0];
+
+ if (is_sec1 && !dst)
+ addr += sizeof(struct talitos_desc);
+ edesc->dma_link_tbl = dma_map_single(dev, addr,
edesc->dma_len,
DMA_BIDIRECTIONAL);
-
+ }
return edesc;
error_sg:
if (iv_dma)
@@ -1468,7 +1488,6 @@ static int aead_decrypt(struct aead_request *req)
DESC_HDR_MODE1_MDEU_CICV;
/* reset integrity check result bits */
- edesc->desc.hdr_lo = 0;
return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
}
@@ -1494,15 +1513,29 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct device *dev = ctx->dev;
+ u32 tmp[DES_EXPKEY_WORDS];
if (keylen > TALITOS_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
+ if (unlikely(crypto_ablkcipher_get_flags(cipher) &
+ CRYPTO_TFM_REQ_WEAK_KEY) &&
+ !des_ekey(tmp, key)) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+ }
+
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;
+ ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+
return 0;
}
@@ -1513,7 +1546,6 @@ static void common_nonsnoop_unmap(struct device *dev,
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
if (edesc->dma_len)
@@ -1555,16 +1587,12 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
bool is_sec1 = has_ftr_sec1(priv);
/* first DWORD empty */
- desc->ptr[0] = zero_entry;
/* cipher iv */
- to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
- to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
+ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
/* cipher key */
- map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
- (char *)&ctx->key, DMA_TO_DEVICE);
+ to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
@@ -1599,7 +1627,6 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
DMA_FROM_DEVICE);
/* last DWORD empty */
- desc->ptr[6] = zero_entry;
if (sync_needed)
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
@@ -1663,26 +1690,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
-
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
- /* When using hashctx-in, must unmap it. */
- if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
- DMA_TO_DEVICE);
-
- if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
- DMA_TO_DEVICE);
-
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
+ if (edesc->desc.next_desc)
+ dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
+ TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
}
static void ahash_done(struct device *dev,
@@ -1696,7 +1713,7 @@ static void ahash_done(struct device *dev,
if (!req_ctx->last && req_ctx->to_hash_later) {
/* Position any partial block for next update/final/finup */
- memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
+ req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
req_ctx->nbuf = req_ctx->to_hash_later;
}
common_nonsnoop_hash_unmap(dev, edesc, areq);
@@ -1710,7 +1727,7 @@ static void ahash_done(struct device *dev,
* SEC1 doesn't like hashing of 0 sized message, so we do the padding
* ourself and submit a padded block
*/
-void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
+static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
struct talitos_edesc *edesc,
struct talitos_ptr *ptr)
{
@@ -1729,6 +1746,7 @@ void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
static int common_nonsnoop_hash(struct talitos_edesc *edesc,
struct ahash_request *areq, unsigned int length,
+ unsigned int offset,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
@@ -1745,44 +1763,48 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
int sg_count;
/* first DWORD empty */
- desc->ptr[0] = zero_entry;
/* hash context in */
if (!req_ctx->first || req_ctx->swinit) {
- map_single_talitos_ptr(dev, &desc->ptr[1],
- req_ctx->hw_context_size,
- (char *)req_ctx->hw_context,
- DMA_TO_DEVICE);
+ to_talitos_ptr(&desc->ptr[1], ctx->dma_hw_context,
+ req_ctx->hw_context_size, is_sec1);
req_ctx->swinit = 0;
- } else {
- desc->ptr[1] = zero_entry;
}
/* Indicate next op is not the first. */
req_ctx->first = 0;
/* HMAC key */
if (ctx->keylen)
- map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
- (char *)&ctx->key, DMA_TO_DEVICE);
- else
- desc->ptr[2] = zero_entry;
+ to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
+ is_sec1);
+
+ if (is_sec1 && req_ctx->nbuf)
+ length -= req_ctx->nbuf;
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
- sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
- else
+ sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
+ edesc->buf + sizeof(struct talitos_desc),
+ length, req_ctx->nbuf);
+ else if (length)
sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
DMA_TO_DEVICE);
/*
* data in
*/
- sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
- &desc->ptr[3], sg_count, 0, 0);
- if (sg_count > 1)
- sync_needed = true;
+ if (is_sec1 && req_ctx->nbuf) {
+ dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
+ HASH_MAX_BLOCK_SIZE;
+
+ to_talitos_ptr(&desc->ptr[3], dma_buf, req_ctx->nbuf, is_sec1);
+ } else {
+ sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+ &desc->ptr[3], sg_count, offset, 0);
+ if (sg_count > 1)
+ sync_needed = true;
+ }
/* fifth DWORD empty */
- desc->ptr[4] = zero_entry;
/* hash/HMAC out -or- hash context out */
if (req_ctx->last)
@@ -1790,16 +1812,44 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
crypto_ahash_digestsize(tfm),
areq->result, DMA_FROM_DEVICE);
else
- map_single_talitos_ptr(dev, &desc->ptr[5],
- req_ctx->hw_context_size,
- req_ctx->hw_context, DMA_FROM_DEVICE);
+ to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
+ req_ctx->hw_context_size, is_sec1);
/* last DWORD empty */
- desc->ptr[6] = zero_entry;
if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
+ if (is_sec1 && req_ctx->nbuf && length) {
+ struct talitos_desc *desc2 = desc + 1;
+ dma_addr_t next_desc;
+
+ memset(desc2, 0, sizeof(*desc2));
+ desc2->hdr = desc->hdr;
+ desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
+ desc2->hdr1 = desc2->hdr;
+ desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
+ desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
+ desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
+
+ to_talitos_ptr(&desc2->ptr[1], ctx->dma_hw_context,
+ req_ctx->hw_context_size, is_sec1);
+
+ copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
+ sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+ &desc2->ptr[3], sg_count, offset, 0);
+ if (sg_count > 1)
+ sync_needed = true;
+ copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
+ if (req_ctx->last)
+ to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
+ req_ctx->hw_context_size, is_sec1);
+
+ next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
+ DMA_BIDIRECTIONAL);
+ desc->next_desc = cpu_to_be32(next_desc);
+ }
+
if (sync_needed)
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
@@ -1818,6 +1868,11 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct talitos_private *priv = dev_get_drvdata(ctx->dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+
+ if (is_sec1)
+ nbytes -= req_ctx->nbuf;
return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
nbytes, 0, 0, 0, areq->base.flags, false);
@@ -1826,17 +1881,35 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
static int ahash_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ unsigned int size;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
/* Initialize the context */
+ req_ctx->buf_idx = 0;
req_ctx->nbuf = 0;
req_ctx->first = 1; /* first indicates h/w must init its context */
req_ctx->swinit = 0; /* assume h/w init of context */
- req_ctx->hw_context_size =
- (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+ req_ctx->hw_context_size = size;
+ if (ctx->dma_hw_context)
+ dma_unmap_single(dev, ctx->dma_hw_context, size,
+ DMA_BIDIRECTIONAL);
+ ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
+ DMA_BIDIRECTIONAL);
+ if (ctx->dma_buf)
+ dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
+ DMA_TO_DEVICE);
+ if (is_sec1)
+ ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
+ sizeof(req_ctx->buf),
+ DMA_TO_DEVICE);
return 0;
}
@@ -1847,6 +1920,9 @@ static int ahash_init(struct ahash_request *areq)
static int ahash_init_sha224_swinit(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
ahash_init(areq);
req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
@@ -1864,6 +1940,9 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq)
req_ctx->hw_context[8] = 0;
req_ctx->hw_context[9] = 0;
+ dma_sync_single_for_device(dev, ctx->dma_hw_context,
+ req_ctx->hw_context_size, DMA_TO_DEVICE);
+
return 0;
}
@@ -1879,6 +1958,11 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
unsigned int to_hash_later;
unsigned int nsg;
int nents;
+ struct device *dev = ctx->dev;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+ int offset = 0;
+ u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
/* Buffer up to one whole block */
@@ -1888,7 +1972,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
return nents;
}
sg_copy_to_buffer(areq->src, nents,
- req_ctx->buf + req_ctx->nbuf, nbytes);
+ ctx_buf + req_ctx->nbuf, nbytes);
req_ctx->nbuf += nbytes;
return 0;
}
@@ -1909,13 +1993,27 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
}
/* Chain in any previously buffered data */
- if (req_ctx->nbuf) {
+ if (!is_sec1 && req_ctx->nbuf) {
nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
sg_init_table(req_ctx->bufsl, nsg);
- sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
+ sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
if (nsg > 1)
sg_chain(req_ctx->bufsl, 2, areq->src);
req_ctx->psrc = req_ctx->bufsl;
+ } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
+ if (nbytes_to_hash > blocksize)
+ offset = blocksize - req_ctx->nbuf;
+ else
+ offset = nbytes_to_hash - req_ctx->nbuf;
+ nents = sg_nents_for_len(areq->src, offset);
+ if (nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return nents;
+ }
+ sg_copy_to_buffer(areq->src, nents,
+ ctx_buf + req_ctx->nbuf, offset);
+ req_ctx->nbuf += offset;
+ req_ctx->psrc = areq->src;
} else
req_ctx->psrc = areq->src;
@@ -1926,7 +2024,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
return nents;
}
sg_pcopy_to_buffer(areq->src, nents,
- req_ctx->bufnext,
+ req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
to_hash_later,
nbytes - to_hash_later);
}
@@ -1948,6 +2046,13 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
/* request SEC to INIT hash. */
if (req_ctx->first && !req_ctx->swinit)
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
+ if (is_sec1) {
+ dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
+ HASH_MAX_BLOCK_SIZE;
+
+ dma_sync_single_for_device(dev, dma_buf,
+ req_ctx->nbuf, DMA_TO_DEVICE);
+ }
/* When the tfm context has a keylen, it's an HMAC.
* A first or last (ie. not middle) descriptor must request HMAC.
@@ -1955,7 +2060,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
if (ctx->keylen && (req_ctx->first || req_ctx->last))
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
- return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
+ return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
ahash_done);
}
@@ -2001,10 +2106,15 @@ static int ahash_export(struct ahash_request *areq, void *out)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct talitos_export_state *export = out;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = ctx->dev;
+ dma_sync_single_for_cpu(dev, ctx->dma_hw_context,
+ req_ctx->hw_context_size, DMA_FROM_DEVICE);
memcpy(export->hw_context, req_ctx->hw_context,
req_ctx->hw_context_size);
- memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
+ memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
export->swinit = req_ctx->swinit;
export->first = req_ctx->first;
export->last = req_ctx->last;
@@ -2019,15 +2129,32 @@ static int ahash_import(struct ahash_request *areq, const void *in)
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
const struct talitos_export_state *export = in;
+ unsigned int size;
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
memset(req_ctx, 0, sizeof(*req_ctx));
- req_ctx->hw_context_size =
- (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
- memcpy(req_ctx->hw_context, export->hw_context,
- req_ctx->hw_context_size);
- memcpy(req_ctx->buf, export->buf, export->nbuf);
+ req_ctx->hw_context_size = size;
+ if (ctx->dma_hw_context)
+ dma_unmap_single(dev, ctx->dma_hw_context, size,
+ DMA_BIDIRECTIONAL);
+
+ memcpy(req_ctx->hw_context, export->hw_context, size);
+ ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
+ DMA_BIDIRECTIONAL);
+ if (ctx->dma_buf)
+ dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
+ DMA_TO_DEVICE);
+ memcpy(req_ctx->buf[0], export->buf, export->nbuf);
+ if (is_sec1)
+ ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
+ sizeof(req_ctx->buf),
+ DMA_TO_DEVICE);
req_ctx->swinit = export->swinit;
req_ctx->first = export->first;
req_ctx->last = export->last;
@@ -2037,22 +2164,6 @@ static int ahash_import(struct ahash_request *areq, const void *in)
return 0;
}
-struct keyhash_result {
- struct completion completion;
- int err;
-};
-
-static void keyhash_complete(struct crypto_async_request *req, int err)
-{
- struct keyhash_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
u8 *hash)
{
@@ -2060,10 +2171,10 @@ static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
struct scatterlist sg[1];
struct ahash_request *req;
- struct keyhash_result hresult;
+ struct crypto_wait wait;
int ret;
- init_completion(&hresult.completion);
+ crypto_init_wait(&wait);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req)
@@ -2072,25 +2183,13 @@ static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
/* Keep tfm keylen == 0 during hash of the long key */
ctx->keylen = 0;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- keyhash_complete, &hresult);
+ crypto_req_done, &wait);
sg_init_one(&sg[0], key, keylen);
ahash_request_set_crypt(req, sg, hash, keylen);
- ret = crypto_ahash_digest(req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(
- &hresult.completion);
- if (!ret)
- ret = hresult.err;
- break;
- default:
- break;
- }
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+
ahash_request_free(req);
return ret;
@@ -2100,6 +2199,7 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct device *dev = ctx->dev;
unsigned int blocksize =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int digestsize = crypto_ahash_digestsize(tfm);
@@ -2122,7 +2222,11 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
memcpy(ctx->key, hash, digestsize);
}
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
ctx->keylen = keysize;
+ ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
return 0;
}
@@ -2614,7 +2718,7 @@ static struct talitos_alg_template driver_algs[] = {
.ivsize = AES_BLOCK_SIZE,
}
},
- .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CTR,
},
@@ -2951,6 +3055,36 @@ static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
return 0;
}
+static void talitos_cra_exit(struct crypto_tfm *tfm)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct device *dev = ctx->dev;
+
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+}
+
+static void talitos_cra_exit_ahash(struct crypto_tfm *tfm)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct device *dev = ctx->dev;
+ unsigned int size;
+
+ talitos_cra_exit(tfm);
+
+ size = (crypto_ahash_digestsize(__crypto_ahash_cast(tfm)) <=
+ SHA256_DIGEST_SIZE)
+ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+
+ if (ctx->dma_hw_context)
+ dma_unmap_single(dev, ctx->dma_hw_context, size,
+ DMA_BIDIRECTIONAL);
+ if (ctx->dma_buf)
+ dma_unmap_single(dev, ctx->dma_buf, HASH_MAX_BLOCK_SIZE * 2,
+ DMA_TO_DEVICE);
+}
+
/*
* given the alg's descriptor header template, determine whether descriptor
* type and primary/secondary execution units required match the hw
@@ -2989,17 +3123,11 @@ static int talitos_remove(struct platform_device *ofdev)
break;
}
list_del(&t_alg->entry);
- kfree(t_alg);
}
if (hw_supports(dev, DESC_HDR_SEL0_RNG))
talitos_unregister_rng(dev);
- for (i = 0; priv->chan && i < priv->num_channels; i++)
- kfree(priv->chan[i].fifo);
-
- kfree(priv->chan);
-
for (i = 0; i < 2; i++)
if (priv->irq[i]) {
free_irq(priv->irq[i], dev);
@@ -3010,10 +3138,6 @@ static int talitos_remove(struct platform_device *ofdev)
if (priv->irq[1])
tasklet_kill(&priv->done_task[1]);
- iounmap(priv->reg);
-
- kfree(priv);
-
return 0;
}
@@ -3025,7 +3149,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
struct talitos_crypto_alg *t_alg;
struct crypto_alg *alg;
- t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
+ t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
+ GFP_KERNEL);
if (!t_alg)
return ERR_PTR(-ENOMEM);
@@ -3035,6 +3160,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
case CRYPTO_ALG_TYPE_ABLKCIPHER:
alg = &t_alg->algt.alg.crypto;
alg->cra_init = talitos_cra_init;
+ alg->cra_exit = talitos_cra_exit;
alg->cra_type = &crypto_ablkcipher_type;
alg->cra_ablkcipher.setkey = ablkcipher_setkey;
alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
@@ -3043,14 +3169,21 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
break;
case CRYPTO_ALG_TYPE_AEAD:
alg = &t_alg->algt.alg.aead.base;
+ alg->cra_exit = talitos_cra_exit;
t_alg->algt.alg.aead.init = talitos_cra_init_aead;
t_alg->algt.alg.aead.setkey = aead_setkey;
t_alg->algt.alg.aead.encrypt = aead_encrypt;
t_alg->algt.alg.aead.decrypt = aead_decrypt;
+ if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+ !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
+ devm_kfree(dev, t_alg);
+ return ERR_PTR(-ENOTSUPP);
+ }
break;
case CRYPTO_ALG_TYPE_AHASH:
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
+ alg->cra_exit = talitos_cra_exit_ahash;
alg->cra_type = &crypto_ahash_type;
t_alg->algt.alg.hash.init = ahash_init;
t_alg->algt.alg.hash.update = ahash_update;
@@ -3064,7 +3197,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
!strncmp(alg->cra_name, "hmac", 4)) {
- kfree(t_alg);
+ devm_kfree(dev, t_alg);
return ERR_PTR(-ENOTSUPP);
}
if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
@@ -3079,7 +3212,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
break;
default:
dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
- kfree(t_alg);
+ devm_kfree(dev, t_alg);
return ERR_PTR(-EINVAL);
}
@@ -3156,11 +3289,11 @@ static int talitos_probe(struct platform_device *ofdev)
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct talitos_private *priv;
- const unsigned int *prop;
int i, err;
int stride;
+ struct resource *res;
- priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -3172,7 +3305,10 @@ static int talitos_probe(struct platform_device *ofdev)
spin_lock_init(&priv->reg_lock);
- priv->reg = of_iomap(np, 0);
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+ priv->reg = devm_ioremap(dev, res->start, resource_size(res));
if (!priv->reg) {
dev_err(dev, "failed to of_iomap\n");
err = -ENOMEM;
@@ -3180,21 +3316,11 @@ static int talitos_probe(struct platform_device *ofdev)
}
/* get SEC version capabilities from device tree */
- prop = of_get_property(np, "fsl,num-channels", NULL);
- if (prop)
- priv->num_channels = *prop;
-
- prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
- if (prop)
- priv->chfifo_len = *prop;
-
- prop = of_get_property(np, "fsl,exec-units-mask", NULL);
- if (prop)
- priv->exec_units = *prop;
-
- prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
- if (prop)
- priv->desc_types = *prop;
+ of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
+ of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
+ of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
+ of_property_read_u32(np, "fsl,descriptor-types-mask",
+ &priv->desc_types);
if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
!priv->exec_units || !priv->desc_types) {
@@ -3244,22 +3370,29 @@ static int talitos_probe(struct platform_device *ofdev)
goto err_out;
if (of_device_is_compatible(np, "fsl,sec1.0")) {
- tasklet_init(&priv->done_task[0], talitos1_done_4ch,
- (unsigned long)dev);
- } else {
- if (!priv->irq[1]) {
- tasklet_init(&priv->done_task[0], talitos2_done_4ch,
+ if (priv->num_channels == 1)
+ tasklet_init(&priv->done_task[0], talitos1_done_ch0,
(unsigned long)dev);
- } else {
+ else
+ tasklet_init(&priv->done_task[0], talitos1_done_4ch,
+ (unsigned long)dev);
+ } else {
+ if (priv->irq[1]) {
tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
(unsigned long)dev);
tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
(unsigned long)dev);
+ } else if (priv->num_channels == 1) {
+ tasklet_init(&priv->done_task[0], talitos2_done_ch0,
+ (unsigned long)dev);
+ } else {
+ tasklet_init(&priv->done_task[0], talitos2_done_4ch,
+ (unsigned long)dev);
}
}
- priv->chan = kzalloc(sizeof(struct talitos_channel) *
- priv->num_channels, GFP_KERNEL);
+ priv->chan = devm_kzalloc(dev, sizeof(struct talitos_channel) *
+ priv->num_channels, GFP_KERNEL);
if (!priv->chan) {
dev_err(dev, "failed to allocate channel management space\n");
err = -ENOMEM;
@@ -3276,8 +3409,9 @@ static int talitos_probe(struct platform_device *ofdev)
spin_lock_init(&priv->chan[i].head_lock);
spin_lock_init(&priv->chan[i].tail_lock);
- priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
- priv->fifo_len, GFP_KERNEL);
+ priv->chan[i].fifo = devm_kzalloc(dev,
+ sizeof(struct talitos_request) *
+ priv->fifo_len, GFP_KERNEL);
if (!priv->chan[i].fifo) {
dev_err(dev, "failed to allocate request fifo %d\n", i);
err = -ENOMEM;
@@ -3343,7 +3477,7 @@ static int talitos_probe(struct platform_device *ofdev)
if (err) {
dev_err(dev, "%s alg registration failed\n",
alg->cra_driver_name);
- kfree(t_alg);
+ devm_kfree(dev, t_alg);
} else
list_add_tail(&t_alg->entry, &priv->alg_list);
}
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 8dd8f40e2771..a65a63e0d6c1 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -52,8 +52,6 @@ struct talitos_ptr {
__be32 ptr; /* address */
};
-static const struct talitos_ptr zero_entry;
-
/* descriptor */
struct talitos_desc {
__be32 hdr; /* header high bits */
@@ -210,9 +208,13 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
#define TALITOS_ISR 0x1010 /* interrupt status register */
#define TALITOS1_ISR_4CHERR ISR1_FORMAT(0xa) /* 4 ch errors mask */
#define TALITOS1_ISR_4CHDONE ISR1_FORMAT(0x5) /* 4 ch done mask */
+#define TALITOS1_ISR_CH_0_ERR (2 << 28) /* ch 0 errors mask */
+#define TALITOS1_ISR_CH_0_DONE (1 << 28) /* ch 0 done mask */
#define TALITOS1_ISR_TEA_ERR 0x00000040
#define TALITOS2_ISR_4CHERR ISR2_FORMAT(0xa) /* 4 ch errors mask */
#define TALITOS2_ISR_4CHDONE ISR2_FORMAT(0x5) /* 4 ch done mask */
+#define TALITOS2_ISR_CH_0_ERR 2 /* ch 0 errors mask */
+#define TALITOS2_ISR_CH_0_DONE 1 /* ch 0 done mask */
#define TALITOS2_ISR_CH_0_2_ERR ISR2_FORMAT(0x2) /* ch 0, 2 err mask */
#define TALITOS2_ISR_CH_0_2_DONE ISR2_FORMAT(0x1) /* ch 0, 2 done mask */
#define TALITOS2_ISR_CH_1_3_ERR ISR2_FORMAT(0x8) /* ch 1, 3 err mask */
@@ -234,6 +236,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
+#define TALITOS_CCCR_LO_NE 0x8 /* fetch next descriptor enab. */
#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
#define TALITOS1_CCCR_LO_RESET 0x1 /* channel reset on SEC1 */
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 790f7cadc1ed..765f53e548ab 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1751,7 +1751,6 @@ static void __exit ux500_cryp_mod_fini(void)
{
pr_debug("[%s] is called!", __func__);
platform_driver_unregister(&cryp_driver);
- return;
}
module_init(ux500_cryp_mod_init);
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 5035b0dc1e40..abe8c15450df 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -319,7 +319,7 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
struct virtio_crypto *vcrypto =
virtcrypto_get_dev_node(node);
if (!vcrypto) {
- pr_err("virtio_crypto: Could not find a virtio device in the system");
+ pr_err("virtio_crypto: Could not find a virtio device in the system\n");
return -ENODEV;
}
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 17d84217dd76..fc60d00a2e84 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -27,21 +27,23 @@
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+
#include "aesp8-ppc.h"
struct p8_aes_ctr_ctx {
- struct crypto_blkcipher *fallback;
+ struct crypto_skcipher *fallback;
struct aes_key enc_key;
};
static int p8_aes_ctr_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_blkcipher *fallback;
+ struct crypto_skcipher *fallback;
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback =
- crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_skcipher(alg, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
"Failed to allocate transformation for '%s': %ld\n",
@@ -49,11 +51,11 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
printk(KERN_INFO "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+ crypto_skcipher_driver_name(fallback));
- crypto_blkcipher_set_flags(
+ crypto_skcipher_set_flags(
fallback,
- crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
+ crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
return 0;
@@ -64,7 +66,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_blkcipher(ctx->fallback);
+ crypto_free_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -83,7 +85,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -117,15 +119,14 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
struct p8_aes_ctr_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- struct blkcipher_desc fallback_desc = {
- .tfm = ctx->fallback,
- .info = desc->info,
- .flags = desc->flags
- };
if (in_interrupt()) {
- ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
- nbytes);
+ SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_tfm(req, ctx->fallback);
+ skcipher_request_set_callback(req, desc->flags, NULL, NULL);
+ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+ ret = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
} else {
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);