aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/nx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/nx')
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c81
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c45
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c87
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c76
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c29
-rw-r--r--drivers/crypto/nx/nx.c64
-rw-r--r--drivers/crypto/nx/nx.h19
-rw-r--r--drivers/crypto/nx/nx_debugfs.c18
8 files changed, 185 insertions, 234 deletions
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index e631f9979127..92e921eceed7 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -18,11 +18,11 @@
#include "nx.h"
-static int cbc_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int cbc_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -50,13 +50,11 @@ static int cbc_aes_nx_set_key(struct crypto_tfm *tfm,
return 0;
}
-static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes,
- int enc)
+static int cbc_aes_nx_crypt(struct skcipher_request *req,
+ int enc)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
@@ -70,10 +68,11 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
do {
- to_process = nbytes - processed;
+ to_process = req->cryptlen - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
- processed, csbcpb->cpb.aes_cbc.iv);
+ rc = nx_build_sg_lists(nx_ctx, req->iv, req->dst, req->src,
+ &to_process, processed,
+ csbcpb->cpb.aes_cbc.iv);
if (rc)
goto out;
@@ -83,56 +82,46 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
- memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+ memcpy(req->iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
- } while (processed < nbytes);
+ } while (processed < req->cryptlen);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int cbc_aes_nx_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_nx_encrypt(struct skcipher_request *req)
{
- return cbc_aes_nx_crypt(desc, dst, src, nbytes, 1);
+ return cbc_aes_nx_crypt(req, 1);
}
-static int cbc_aes_nx_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_nx_decrypt(struct skcipher_request *req)
{
- return cbc_aes_nx_crypt(desc, dst, src, nbytes, 0);
+ return cbc_aes_nx_crypt(req, 0);
}
-struct crypto_alg nx_cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_cbc_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = cbc_aes_nx_set_key,
- .encrypt = cbc_aes_nx_encrypt,
- .decrypt = cbc_aes_nx_decrypt,
- }
+struct skcipher_alg nx_cbc_aes_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_cbc_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_aes_nx_set_key,
+ .encrypt = cbc_aes_nx_encrypt,
+ .decrypt = cbc_aes_nx_decrypt,
};
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 5be8f01c5da8..4c9362eebefd 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -327,7 +327,7 @@ static int generate_pat(u8 *iv,
}
static int ccm_nx_decrypt(struct aead_request *req,
- struct blkcipher_desc *desc,
+ u8 *iv,
unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -348,7 +348,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
req->src, nbytes + req->assoclen, authsize,
SCATTERWALK_FROM_SG);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -367,7 +367,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
@@ -381,7 +381,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
/* for partial completion, copy following for next
* entry into loop...
*/
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -405,7 +405,7 @@ out:
}
static int ccm_nx_encrypt(struct aead_request *req,
- struct blkcipher_desc *desc,
+ u8 *iv,
unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -418,7 +418,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -436,7 +436,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
@@ -450,7 +450,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
/* for partial completion, copy following for next
* entry into loop...
*/
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -481,67 +481,50 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- struct blkcipher_desc desc;
u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
-
- return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
+ return ccm_nx_encrypt(req, iv, req->assoclen - 8);
}
static int ccm_aes_nx_encrypt(struct aead_request *req)
{
- struct blkcipher_desc desc;
int rc;
- desc.info = req->iv;
-
- rc = crypto_ccm_check_iv(desc.info);
+ rc = crypto_ccm_check_iv(req->iv);
if (rc)
return rc;
- return ccm_nx_encrypt(req, &desc, req->assoclen);
+ return ccm_nx_encrypt(req, req->iv, req->assoclen);
}
static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- struct blkcipher_desc desc;
u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
-
- return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
+ return ccm_nx_decrypt(req, iv, req->assoclen - 8);
}
static int ccm_aes_nx_decrypt(struct aead_request *req)
{
- struct blkcipher_desc desc;
int rc;
- desc.info = req->iv;
-
- rc = crypto_ccm_check_iv(desc.info);
+ rc = crypto_ccm_check_iv(req->iv);
if (rc)
return rc;
- return ccm_nx_decrypt(req, &desc, req->assoclen);
+ return ccm_nx_decrypt(req, req->iv, req->assoclen);
}
-/* tell the block cipher walk routines that this is a stream cipher by
- * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
- * during encrypt/decrypt doesn't solve this problem, because it calls
- * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
- * but instead uses this tfm->blocksize. */
struct aead_alg nx_ccm_aes_alg = {
.base = {
.cra_name = "ccm(aes)",
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 191e226a11a1..6d5ce1a66f1e 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -19,11 +19,11 @@
#include "nx.h"
-static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int ctr_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -51,11 +51,11 @@ static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
return 0;
}
-static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int ctr3686_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
@@ -69,12 +69,10 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
return ctr_aes_nx_set_key(tfm, in_key, key_len);
}
-static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ctr_aes_nx_crypt(struct skcipher_request *req, u8 *iv)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
@@ -83,10 +81,11 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
do {
- to_process = nbytes - processed;
+ to_process = req->cryptlen - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
- processed, csbcpb->cpb.aes_ctr.iv);
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
+ &to_process, processed,
+ csbcpb->cpb.aes_ctr.iv);
if (rc)
goto out;
@@ -96,59 +95,51 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
- memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
atomic_inc(&(nx_ctx->stats->aes_ops));
atomic64_add(csbcpb->csb.processed_byte_count,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
- } while (processed < nbytes);
+ } while (processed < req->cryptlen);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ctr3686_aes_nx_crypt(struct skcipher_request *req)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
u8 iv[16];
memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
- memcpy(iv + CTR_RFC3686_NONCE_SIZE,
- desc->info, CTR_RFC3686_IV_SIZE);
+ memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
- desc->info = iv;
-
- return ctr_aes_nx_crypt(desc, dst, src, nbytes);
+ return ctr_aes_nx_crypt(req, iv);
}
-struct crypto_alg nx_ctr3686_aes_alg = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "rfc3686-ctr-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ctr_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = ctr3686_aes_nx_set_key,
- .encrypt = ctr3686_aes_nx_crypt,
- .decrypt = ctr3686_aes_nx_crypt,
- }
+struct skcipher_alg nx_ctr3686_aes_alg = {
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_driver_name = "rfc3686-ctr-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_ctr_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = ctr3686_aes_nx_set_key,
+ .encrypt = ctr3686_aes_nx_crypt,
+ .decrypt = ctr3686_aes_nx_crypt,
+ .chunksize = AES_BLOCK_SIZE,
};
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index c67570470c9d..77e338dc33f1 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -18,11 +18,11 @@
#include "nx.h"
-static int ecb_aes_nx_set_key(struct crypto_tfm *tfm,
- const u8 *in_key,
- unsigned int key_len)
+static int ecb_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -50,13 +50,11 @@ static int ecb_aes_nx_set_key(struct crypto_tfm *tfm,
return 0;
}
-static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes,
- int enc)
+static int ecb_aes_nx_crypt(struct skcipher_request *req,
+ int enc)
{
- struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
@@ -70,10 +68,10 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
do {
- to_process = nbytes - processed;
+ to_process = req->cryptlen - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
- processed, NULL);
+ rc = nx_build_sg_lists(nx_ctx, NULL, req->dst, req->src,
+ &to_process, processed, NULL);
if (rc)
goto out;
@@ -83,7 +81,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
}
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
@@ -92,46 +90,36 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
&(nx_ctx->stats->aes_bytes));
processed += to_process;
- } while (processed < nbytes);
+ } while (processed < req->cryptlen);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int ecb_aes_nx_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_nx_encrypt(struct skcipher_request *req)
{
- return ecb_aes_nx_crypt(desc, dst, src, nbytes, 1);
+ return ecb_aes_nx_crypt(req, 1);
}
-static int ecb_aes_nx_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_nx_decrypt(struct skcipher_request *req)
{
- return ecb_aes_nx_crypt(desc, dst, src, nbytes, 0);
+ return ecb_aes_nx_crypt(req, 0);
}
-struct crypto_alg nx_ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_alignmask = 0xf,
- .cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = nx_crypto_ctx_aes_ecb_init,
- .cra_exit = nx_crypto_ctx_exit,
- .cra_blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = ecb_aes_nx_set_key,
- .encrypt = ecb_aes_nx_encrypt,
- .decrypt = ecb_aes_nx_decrypt,
- }
+struct skcipher_alg nx_ecb_aes_alg = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_alignmask = 0xf,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_ecb_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = ecb_aes_nx_set_key,
+ .encrypt = ecb_aes_nx_encrypt,
+ .decrypt = ecb_aes_nx_decrypt,
};
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 7d3d67871270..19c6ed5baea4 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -166,8 +166,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
return rc;
}
-static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
- unsigned int assoclen)
+static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen)
{
int rc;
struct nx_crypto_ctx *nx_ctx =
@@ -190,7 +189,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* Copy IV */
- memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE);
do {
/*
@@ -240,8 +239,7 @@ out:
return rc;
}
-static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
- int enc)
+static int gcm_empty(struct aead_request *req, const u8 *iv, int enc)
{
int rc;
struct nx_crypto_ctx *nx_ctx =
@@ -268,7 +266,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
len = AES_BLOCK_SIZE;
/* Encrypt the counter/IV */
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv,
&len, nx_ctx->ap->sglen);
if (len != AES_BLOCK_SIZE)
@@ -285,7 +283,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -313,7 +311,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
crypto_aead_ctx(crypto_aead_reqtfm(req));
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
- struct blkcipher_desc desc;
unsigned int nbytes = req->cryptlen;
unsigned int processed = 0, to_process;
unsigned long irq_flags;
@@ -321,15 +318,14 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- desc.info = rctx->iv;
/* initialize the counter */
- *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
+ *(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1;
if (nbytes == 0) {
if (assoclen == 0)
- rc = gcm_empty(req, &desc, enc);
+ rc = gcm_empty(req, rctx->iv, enc);
else
- rc = gmac(req, &desc, assoclen);
+ rc = gmac(req, rctx->iv, assoclen);
if (rc)
goto out;
else
@@ -358,7 +354,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
to_process = nbytes - processed;
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
- rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
+ rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst,
req->src, &to_process,
processed + req->assoclen,
csbcpb->cpb.aes_gcm.iv_or_cnt);
@@ -377,7 +373,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
if (rc)
goto out;
- memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
+ memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_gcm.in_s0,
@@ -471,11 +467,6 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req)
return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
}
-/* tell the block cipher walk routines that this is a stream cipher by
- * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
- * during encrypt/decrypt doesn't solve this problem, because it calls
- * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
- * but instead uses this tfm->blocksize. */
struct aead_alg nx_gcm_aes_alg = {
.base = {
.cra_name = "gcm(aes)",
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 28817880c76d..f03c238f5a31 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -243,25 +243,25 @@ static long int trim_sg_list(struct nx_sg *sg,
* scatterlists based on them.
*
* @nx_ctx: NX crypto context for the lists we're building
- * @desc: the block cipher descriptor for the operation
+ * @iv: iv data, if the algorithm requires it
* @dst: destination scatterlist
* @src: source scatterlist
* @nbytes: length of data described in the scatterlists
* @offset: number of bytes to fast-forward past at the beginning of
* scatterlists.
- * @iv: destination for the iv data, if the algorithm requires it
+ * @oiv: destination for the iv data, if the algorithm requires it
*
- * This is common code shared by all the AES algorithms. It uses the block
- * cipher walk routines to traverse input and output scatterlists, building
+ * This is common code shared by all the AES algorithms. It uses the crypto
+ * scatterlist walk routines to traverse input and output scatterlists, building
* corresponding NX scatterlists
*/
int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
- struct blkcipher_desc *desc,
+ const u8 *iv,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int *nbytes,
unsigned int offset,
- u8 *iv)
+ u8 *oiv)
{
unsigned int delta = 0;
unsigned int total = *nbytes;
@@ -274,8 +274,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
max_sg_len = min_t(u64, max_sg_len,
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
- if (iv)
- memcpy(iv, desc->info, AES_BLOCK_SIZE);
+ if (oiv)
+ memcpy(oiv, iv, AES_BLOCK_SIZE);
*nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
@@ -511,10 +511,10 @@ static bool nx_check_props(struct device *dev, u32 fc, u32 mode)
return true;
}
-static int nx_register_alg(struct crypto_alg *alg, u32 fc, u32 mode)
+static int nx_register_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
{
return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
- crypto_register_alg(alg) : 0;
+ crypto_register_skcipher(alg) : 0;
}
static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode)
@@ -531,10 +531,10 @@ static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot)
crypto_register_shash(alg) : 0;
}
-static void nx_unregister_alg(struct crypto_alg *alg, u32 fc, u32 mode)
+static void nx_unregister_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
{
if (nx_check_props(NULL, fc, mode))
- crypto_unregister_alg(alg);
+ crypto_unregister_skcipher(alg);
}
static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode)
@@ -573,15 +573,16 @@ static int nx_register_algs(void)
nx_driver.of.status = NX_OKAY;
- rc = nx_register_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ rc = nx_register_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
if (rc)
goto out;
- rc = nx_register_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+ rc = nx_register_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
if (rc)
goto out_unreg_ecb;
- rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
+ rc = nx_register_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES,
+ NX_MODE_AES_CTR);
if (rc)
goto out_unreg_cbc;
@@ -633,11 +634,11 @@ out_unreg_gcm4106:
out_unreg_gcm:
nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
out_unreg_ctr3686:
- nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
+ nx_unregister_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
out_unreg_cbc:
- nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+ nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
out_unreg_ecb:
- nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
out:
return rc;
}
@@ -704,21 +705,21 @@ int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
NX_MODE_AES_GCM);
}
-int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CTR);
}
-int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
NX_MODE_AES_CBC);
}
-int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
NX_MODE_AES_ECB);
}
@@ -752,6 +753,11 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
nx_ctx->out_sg = NULL;
}
+void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ nx_crypto_ctx_exit(crypto_skcipher_ctx(tfm));
+}
+
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
{
struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
@@ -798,10 +804,12 @@ static int nx_remove(struct vio_dev *viodev)
NX_FC_AES, NX_MODE_AES_GCM);
nx_unregister_aead(&nx_gcm_aes_alg,
NX_FC_AES, NX_MODE_AES_GCM);
- nx_unregister_alg(&nx_ctr3686_aes_alg,
- NX_FC_AES, NX_MODE_AES_CTR);
- nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
- nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ nx_unregister_skcipher(&nx_ctr3686_aes_alg,
+ NX_FC_AES, NX_MODE_AES_CTR);
+ nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES,
+ NX_MODE_AES_CBC);
+ nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES,
+ NX_MODE_AES_ECB);
}
return 0;
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 7ecca168f8c4..91c54289124a 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -145,19 +145,20 @@ struct crypto_aead;
int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
-int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
-int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm);
-int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm);
+int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm);
+int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
+void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm);
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
-int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
- struct scatterlist *, struct scatterlist *, unsigned int *,
- unsigned int, u8 *);
+int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, const u8 *iv,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int *nbytes, unsigned int offset, u8 *oiv);
struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
struct scatterlist *, unsigned int,
unsigned int *);
@@ -175,11 +176,11 @@ void nx_debugfs_fini(struct nx_crypto_driver *);
#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
-extern struct crypto_alg nx_cbc_aes_alg;
-extern struct crypto_alg nx_ecb_aes_alg;
+extern struct skcipher_alg nx_cbc_aes_alg;
+extern struct skcipher_alg nx_ecb_aes_alg;
extern struct aead_alg nx_gcm_aes_alg;
extern struct aead_alg nx_gcm4106_aes_alg;
-extern struct crypto_alg nx_ctr3686_aes_alg;
+extern struct skcipher_alg nx_ctr3686_aes_alg;
extern struct aead_alg nx_ccm_aes_alg;
extern struct aead_alg nx_ccm4309_aes_alg;
extern struct shash_alg nx_shash_aes_xcbc_alg;
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
index e0d44a5512ab..1975bcbee997 100644
--- a/drivers/crypto/nx/nx_debugfs.c
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -38,23 +38,23 @@ void nx_debugfs_init(struct nx_crypto_driver *drv)
drv->dfs_root = root;
debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.aes_ops);
+ root, &drv->stats.aes_ops.counter);
debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.sha256_ops);
+ root, &drv->stats.sha256_ops.counter);
debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.sha512_ops);
+ root, &drv->stats.sha512_ops.counter);
debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u64 *)&drv->stats.aes_bytes);
+ root, &drv->stats.aes_bytes.counter);
debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u64 *)&drv->stats.sha256_bytes);
+ root, &drv->stats.sha256_bytes.counter);
debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u64 *)&drv->stats.sha512_bytes);
+ root, &drv->stats.sha512_bytes.counter);
debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.errors);
+ root, &drv->stats.errors.counter);
debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.last_error);
+ root, &drv->stats.last_error.counter);
debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH,
- root, (u32 *)&drv->stats.last_error_pid);
+ root, &drv->stats.last_error_pid.counter);
}
void