aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 09:52:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 09:52:51 -0700
commit3e7a716a92a0e051f5502c7b689f8c9127c37c33 (patch)
tree2ebb892eb3a024f108e68a9577c767a53b955a4a /drivers/crypto/caam
parentMerge tag 'edac_for_3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp (diff)
parentcrypto: drbg - fix failure of generating multiple of 2**16 bytes (diff)
downloadlinux-dev-3e7a716a92a0e051f5502c7b689f8c9127c37c33.tar.xz
linux-dev-3e7a716a92a0e051f5502c7b689f8c9127c37c33.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: - CTR(AES) optimisation on x86_64 using "by8" AVX. - arm64 support to ccp - Intel QAT crypto driver - Qualcomm crypto engine driver - x86-64 assembly optimisation for 3DES - CTR(3DES) speed test - move FIPS panic from module.c so that it only triggers on crypto modules - SP800-90A Deterministic Random Bit Generator (drbg). - more test vectors for ghash. - tweak self tests to catch partial block bugs. - misc fixes. * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (94 commits) crypto: drbg - fix failure of generating multiple of 2**16 bytes crypto: ccp - Do not sign extend input data to CCP crypto: testmgr - add missing spaces to drbg error strings crypto: atmel-tdes - Switch to managed version of kzalloc crypto: atmel-sha - Switch to managed version of kzalloc crypto: testmgr - use chunks smaller than algo block size in chunk tests crypto: qat - Fixed SKU1 dev issue crypto: qat - Use hweight for bit counting crypto: qat - Updated print outputs crypto: qat - change ae_num to ae_id crypto: qat - change slice->regions to slice->region crypto: qat - use min_t macro crypto: qat - remove unnecessary parentheses crypto: qat - remove unneeded header crypto: qat - checkpatch blank lines crypto: qat - remove unnecessary return codes crypto: Resolve shadow warnings crypto: ccp - Remove "select OF" from Kconfig crypto: caam - fix DECO RSR polling crypto: qce - Let 'DEV_QCE' depend on both HAS_DMA and HAS_IOMEM ...
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r--drivers/crypto/caam/caamalg.c80
-rw-r--r--drivers/crypto/caam/caamhash.c186
-rw-r--r--drivers/crypto/caam/caamrng.c79
-rw-r--r--drivers/crypto/caam/ctrl.c76
-rw-r--r--drivers/crypto/caam/desc.h1
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c6
-rw-r--r--drivers/crypto/caam/regs.h105
8 files changed, 425 insertions, 109 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index c09ce1f040d3..a80ea853701d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -97,6 +97,13 @@ static inline void append_dec_op1(u32 *desc, u32 type)
{
u32 *jump_cmd, *uncond_jump_cmd;
+ /* DK bit is valid only for AES */
+ if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ return;
+ }
+
jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
append_operation(desc, type | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT);
@@ -786,7 +793,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
@@ -1313,8 +1320,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
DMA_FROM_DEVICE, dst_chained);
}
- /* Check if data are contiguous */
iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Check if data are contiguous */
if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
iv_dma || src_nents || iv_dma + ivsize !=
sg_dma_address(req->src)) {
@@ -1345,8 +1357,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
*all_contig_ptr = all_contig;
sec4_sg_index = 0;
@@ -1369,6 +1379,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sg_to_sec4_sg_last(req->dst, dst_nents,
edesc->sec4_sg + sec4_sg_index, 0);
}
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return ERR_PTR(-ENOMEM);
+ }
return edesc;
}
@@ -1494,8 +1510,13 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
DMA_FROM_DEVICE, dst_chained);
}
- /* Check if data are contiguous */
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Check if data are contiguous */
if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
contig &= ~GIV_SRC_CONTIG;
@@ -1534,8 +1555,6 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
*contig_ptr = contig;
sec4_sg_index = 0;
@@ -1559,6 +1578,12 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sg_to_sec4_sg_last(req->dst, dst_nents,
edesc->sec4_sg + sec4_sg_index, 0);
}
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return ERR_PTR(-ENOMEM);
+ }
return edesc;
}
@@ -1650,11 +1675,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
DMA_FROM_DEVICE, dst_chained);
}
+ iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
/*
* Check if iv can be contiguous with source and destination.
* If so, include it. If not, create scatterlist.
*/
- iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
iv_contig = true;
else
@@ -1693,6 +1723,11 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
edesc->iv_dma = iv_dma;
#ifdef DEBUG
@@ -2441,8 +2476,37 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
static int __init caam_algapi_init(void)
{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ void *priv;
int i = 0, err = 0;
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev) {
+ of_node_put(dev_node);
+ return -ENODEV;
+ }
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv)
+ return -ENODEV;
+
+
INIT_LIST_HEAD(&alg_list);
/* register crypto algorithms the device supports */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 0d9284ef96a8..b464d03ebf40 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -137,13 +137,20 @@ struct caam_hash_state {
/* Common job descriptor seq in/out ptr routines */
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
-static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
- struct caam_hash_state *state,
- int ctx_len)
+static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
+ struct caam_hash_state *state,
+ int ctx_len)
{
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, state->ctx_dma)) {
+ dev_err(jrdev, "unable to map ctx\n");
+ return -ENOMEM;
+ }
+
append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
+
+ return 0;
}
/* Map req->result, and append seq_out_ptr command that points to it */
@@ -201,14 +208,19 @@ try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
}
/* Map state->caam_ctx, and add it to link table */
-static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
- struct caam_hash_state *state,
- int ctx_len,
- struct sec4_sg_entry *sec4_sg,
- u32 flag)
+static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+ struct caam_hash_state *state, int ctx_len,
+ struct sec4_sg_entry *sec4_sg, u32 flag)
{
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+ if (dma_mapping_error(jrdev, state->ctx_dma)) {
+ dev_err(jrdev, "unable to map ctx\n");
+ return -ENOMEM;
+ }
+
dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
+
+ return 0;
}
/* Common shared descriptor commands */
@@ -487,11 +499,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
digestsize, 1);
#endif
}
- *keylen = digestsize;
-
dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+ *keylen = digestsize;
+
kfree(desc);
return ret;
@@ -706,7 +718,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
if (err)
caam_jr_strstatus(jrdev, err);
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
kfree(edesc);
#ifdef DEBUG
@@ -741,7 +753,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
if (err)
caam_jr_strstatus(jrdev, err);
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
kfree(edesc);
#ifdef DEBUG
@@ -808,12 +820,11 @@ static int ahash_update_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes,
- DMA_TO_DEVICE);
- ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_BIDIRECTIONAL);
+ ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
+ if (ret)
+ return ret;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
edesc->sec4_sg + 1,
@@ -839,6 +850,14 @@ static int ahash_update_ctx(struct ahash_request *req)
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
to_hash, LDST_SGF);
@@ -911,23 +930,34 @@ static int ahash_final_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
edesc->src_nents = 0;
- ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
- DMA_TO_DEVICE);
+ ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
LDST_SGF);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -989,11 +1019,11 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
- DMA_TO_DEVICE);
+ ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
@@ -1002,11 +1032,22 @@ static int ahash_finup_ctx(struct ahash_request *req)
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
sec4_sg_src_index, chained);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
buflen + req->nbytes, LDST_SGF);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1056,8 +1097,7 @@ static int ahash_digest(struct ahash_request *req)
}
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->src_nents = src_nents;
edesc->chained = chained;
@@ -1067,6 +1107,12 @@ static int ahash_digest(struct ahash_request *req)
if (src_nents) {
sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
src_dma = edesc->sec4_sg_dma;
options = LDST_SGF;
} else {
@@ -1077,6 +1123,10 @@ static int ahash_digest(struct ahash_request *req)
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1125,11 +1175,19 @@ static int ahash_final_no_ctx(struct ahash_request *req)
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, state->buf_dma)) {
+ dev_err(jrdev, "unable to map src\n");
+ return -ENOMEM;
+ }
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
edesc->src_nents = 0;
#ifdef DEBUG
@@ -1197,9 +1255,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes,
- DMA_TO_DEVICE);
+ edesc->dst_dma = 0;
state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
buf, *buflen);
@@ -1216,9 +1272,19 @@ static int ahash_update_no_ctx(struct ahash_request *req)
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
- map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ if (ret)
+ return ret;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1297,8 +1363,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
state->buf_dma, buflen,
@@ -1307,11 +1371,22 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
chained);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
req->nbytes, LDST_SGF);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1380,13 +1455,19 @@ static int ahash_update_first(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes,
- DMA_TO_DEVICE);
+ edesc->dst_dma = 0;
if (src_nents) {
sg_to_sec4_sg_last(req->src, src_nents,
edesc->sec4_sg, 0);
+ edesc->sec4_sg_dma = dma_map_single(jrdev,
+ edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
src_dma = edesc->sec4_sg_dma;
options = LDST_SGF;
} else {
@@ -1404,7 +1485,9 @@ static int ahash_update_first(struct ahash_request *req)
append_seq_in_ptr(desc, src_dma, to_hash, options);
- map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ if (ret)
+ return ret;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1453,6 +1536,7 @@ static int ahash_init(struct ahash_request *req)
state->final = ahash_final_no_ctx;
state->current_buf = 0;
+ state->buf_dma = 0;
return 0;
}
@@ -1787,8 +1871,36 @@ caam_hash_alloc(struct caam_hash_template *template,
static int __init caam_algapi_hash_init(void)
{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ void *priv;
int i = 0, err = 0;
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev) {
+ of_node_put(dev_node);
+ return -ENODEV;
+ }
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv)
+ return -ENODEV;
+
INIT_LIST_HEAD(&hash_list);
/* register crypto algorithms the device supports */
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 8c07d3153f12..ae31e555793c 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -185,7 +185,7 @@ static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
max - copied_idx, false);
}
-static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
+static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
{
struct device *jrdev = ctx->jrdev;
u32 *desc = ctx->sh_desc;
@@ -203,13 +203,18 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
desc, desc_bytes(desc), 1);
#endif
+ return 0;
}
-static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
{
struct device *jrdev = ctx->jrdev;
struct buf_data *bd = &ctx->bufs[buf_id];
@@ -220,12 +225,17 @@ static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
HDR_REVERSE);
bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, bd->addr)) {
+ dev_err(jrdev, "unable to map dst\n");
+ return -ENOMEM;
+ }
append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
desc, desc_bytes(desc), 1);
#endif
+ return 0;
}
static void caam_cleanup(struct hwrng *rng)
@@ -242,24 +252,44 @@ static void caam_cleanup(struct hwrng *rng)
rng_unmap_ctx(rng_ctx);
}
-static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
+static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
{
struct buf_data *bd = &ctx->bufs[buf_id];
+ int err;
+
+ err = rng_create_job_desc(ctx, buf_id);
+ if (err)
+ return err;
- rng_create_job_desc(ctx, buf_id);
atomic_set(&bd->empty, BUF_EMPTY);
submit_job(ctx, buf_id == ctx->current_buf);
wait_for_completion(&bd->filled);
+
+ return 0;
}
-static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
+static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
{
+ int err;
+
ctx->jrdev = jrdev;
- rng_create_sh_desc(ctx);
+
+ err = rng_create_sh_desc(ctx);
+ if (err)
+ return err;
+
ctx->current_buf = 0;
ctx->cur_buf_idx = 0;
- caam_init_buf(ctx, 0);
- caam_init_buf(ctx, 1);
+
+ err = caam_init_buf(ctx, 0);
+ if (err)
+ return err;
+
+ err = caam_init_buf(ctx, 1);
+ if (err)
+ return err;
+
+ return 0;
}
static struct hwrng caam_rng = {
@@ -278,6 +308,35 @@ static void __exit caam_rng_exit(void)
static int __init caam_rng_init(void)
{
struct device *dev;
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ void *priv;
+ int err;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev) {
+ of_node_put(dev_node);
+ return -ENODEV;
+ }
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv)
+ return -ENODEV;
dev = caam_jr_alloc();
if (IS_ERR(dev)) {
@@ -287,7 +346,9 @@ static int __init caam_rng_init(void)
rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
if (!rng_ctx)
return -ENOMEM;
- caam_init_rng(rng_ctx, dev);
+ err = caam_init_rng(rng_ctx, dev);
+ if (err)
+ return err;
dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 1c38f86bf63a..3cade79ea41e 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -5,6 +5,7 @@
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
+#include <linux/device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -87,6 +88,17 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
/* Set the bit to request direct access to DECO0 */
topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+ if (ctrlpriv->virt_en == 1) {
+ setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+
+ while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) &&
+ --timeout)
+ cpu_relax();
+
+ timeout = 100000;
+ }
+
setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
@@ -129,6 +141,9 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
*status = rd_reg32(&topregs->deco.op_status_hi) &
DECO_OP_STATUS_HI_ERR_MASK;
+ if (ctrlpriv->virt_en == 1)
+ clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+
/* Mark the DECO as free */
clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
@@ -295,9 +310,6 @@ static int caam_remove(struct platform_device *pdev)
/* Unmap controller region */
iounmap(&topregs->ctrl);
- kfree(ctrlpriv->jrpdev);
- kfree(ctrlpriv);
-
return ret;
}
@@ -380,9 +392,11 @@ static int caam_probe(struct platform_device *pdev)
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
- u64 cha_vid;
+ u32 scfgr, comp_params;
+ u32 cha_vid_ls;
- ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
+ ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
+ GFP_KERNEL);
if (!ctrlpriv)
return -ENOMEM;
@@ -413,13 +427,40 @@ static int caam_probe(struct platform_device *pdev)
setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+ /*
+ * Read the Compile Time paramters and SCFGR to determine
+ * if Virtualization is enabled for this platform
+ */
+ comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms);
+ scfgr = rd_reg32(&topregs->ctrl.scfgr);
+
+ ctrlpriv->virt_en = 0;
+ if (comp_params & CTPR_MS_VIRT_EN_INCL) {
+ /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
+ * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
+ */
+ if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
+ (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
+ (scfgr & SCFGR_VIRT_EN)))
+ ctrlpriv->virt_en = 1;
+ } else {
+ /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
+ if (comp_params & CTPR_MS_VIRT_EN_POR)
+ ctrlpriv->virt_en = 1;
+ }
+
+ if (ctrlpriv->virt_en == 1)
+ setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START |
+ JRSTART_JR1_START | JRSTART_JR2_START |
+ JRSTART_JR3_START);
+
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
- dma_set_mask(dev, DMA_BIT_MASK(40));
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
else
- dma_set_mask(dev, DMA_BIT_MASK(36));
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
else
- dma_set_mask(dev, DMA_BIT_MASK(32));
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
/*
* Detect and enable JobRs
@@ -432,8 +473,9 @@ static int caam_probe(struct platform_device *pdev)
of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
rspec++;
- ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
- GFP_KERNEL);
+ ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
+ sizeof(struct platform_device *) * rspec,
+ GFP_KERNEL);
if (ctrlpriv->jrpdev == NULL) {
iounmap(&topregs->ctrl);
return -ENOMEM;
@@ -456,8 +498,9 @@ static int caam_probe(struct platform_device *pdev)
}
/* Check to see if QI present. If so, enable */
- ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
- CTPR_QI_MASK);
+ ctrlpriv->qi_present =
+ !!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) &
+ CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present) {
ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
/* This is all that's required to physically enable QI */
@@ -471,13 +514,13 @@ static int caam_probe(struct platform_device *pdev)
return -ENOMEM;
}
- cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id);
+ cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls);
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
*/
- if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
+ if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
/*
@@ -531,7 +574,8 @@ static int caam_probe(struct platform_device *pdev)
/* NOTE: RTIC detection ought to go here, around Si time */
- caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
+ caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 |
+ (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls);
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
@@ -547,7 +591,7 @@ static int caam_probe(struct platform_device *pdev)
*/
perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
- ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
+ ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
/* Controller-level - performance monitor counters */
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 7e4500f18df6..d397ff9d56fd 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -321,7 +321,6 @@ struct sec4_sg_entry {
/* Continue - Not the last FIFO store to come */
#define FIFOST_CONT_SHIFT 23
#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
-#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
/*
* Extended Length - use 32-bit extended length that
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 6d85fcc5bd0a..97363db4e56e 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -82,6 +82,7 @@ struct caam_drv_private {
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
+ int virt_en; /* Virtualization enabled in CAAM */
#define RNG4_MAX_HANDLES 2
/* RNG4 block */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index b512a4ba7569..4d18e27ffa9e 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -476,11 +476,11 @@ static int caam_jr_probe(struct platform_device *pdev)
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
- dma_set_mask(jrdev, DMA_BIT_MASK(40));
+ dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
else
- dma_set_mask(jrdev, DMA_BIT_MASK(36));
+ dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
else
- dma_set_mask(jrdev, DMA_BIT_MASK(32));
+ dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
/* Identify the interrupt */
jrpriv->irq = irq_of_parse_and_map(nprop, 0);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index cbde8b95a6f8..f48e344ffc39 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -84,6 +84,7 @@
#endif
#ifndef CONFIG_64BIT
+#ifdef __BIG_ENDIAN
static inline void wr_reg64(u64 __iomem *reg, u64 data)
{
wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
@@ -95,6 +96,21 @@ static inline u64 rd_reg64(u64 __iomem *reg)
return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
((u64)rd_reg32((u32 __iomem *)reg + 1));
}
+#else
+#ifdef __LITTLE_ENDIAN
+static inline void wr_reg64(u64 __iomem *reg, u64 data)
+{
+ wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32);
+ wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull);
+}
+
+static inline u64 rd_reg64(u64 __iomem *reg)
+{
+ return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) |
+ ((u64)rd_reg32((u32 __iomem *)reg));
+}
+#endif
+#endif
#endif
/*
@@ -114,45 +130,45 @@ struct jr_outentry {
*/
/* Number of DECOs */
-#define CHA_NUM_DECONUM_SHIFT 56
-#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
+#define CHA_NUM_MS_DECONUM_SHIFT 24
+#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
/* CHA Version IDs */
-#define CHA_ID_AES_SHIFT 0
-#define CHA_ID_AES_MASK (0xfull << CHA_ID_AES_SHIFT)
+#define CHA_ID_LS_AES_SHIFT 0
+#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
-#define CHA_ID_DES_SHIFT 4
-#define CHA_ID_DES_MASK (0xfull << CHA_ID_DES_SHIFT)
+#define CHA_ID_LS_DES_SHIFT 4
+#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
-#define CHA_ID_ARC4_SHIFT 8
-#define CHA_ID_ARC4_MASK (0xfull << CHA_ID_ARC4_SHIFT)
+#define CHA_ID_LS_ARC4_SHIFT 8
+#define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT)
-#define CHA_ID_MD_SHIFT 12
-#define CHA_ID_MD_MASK (0xfull << CHA_ID_MD_SHIFT)
+#define CHA_ID_LS_MD_SHIFT 12
+#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
-#define CHA_ID_RNG_SHIFT 16
-#define CHA_ID_RNG_MASK (0xfull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_LS_RNG_SHIFT 16
+#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
-#define CHA_ID_SNW8_SHIFT 20
-#define CHA_ID_SNW8_MASK (0xfull << CHA_ID_SNW8_SHIFT)
+#define CHA_ID_LS_SNW8_SHIFT 20
+#define CHA_ID_LS_SNW8_MASK (0xfull << CHA_ID_LS_SNW8_SHIFT)
-#define CHA_ID_KAS_SHIFT 24
-#define CHA_ID_KAS_MASK (0xfull << CHA_ID_KAS_SHIFT)
+#define CHA_ID_LS_KAS_SHIFT 24
+#define CHA_ID_LS_KAS_MASK (0xfull << CHA_ID_LS_KAS_SHIFT)
-#define CHA_ID_PK_SHIFT 28
-#define CHA_ID_PK_MASK (0xfull << CHA_ID_PK_SHIFT)
+#define CHA_ID_LS_PK_SHIFT 28
+#define CHA_ID_LS_PK_MASK (0xfull << CHA_ID_LS_PK_SHIFT)
-#define CHA_ID_CRC_SHIFT 32
-#define CHA_ID_CRC_MASK (0xfull << CHA_ID_CRC_SHIFT)
+#define CHA_ID_MS_CRC_SHIFT 0
+#define CHA_ID_MS_CRC_MASK (0xfull << CHA_ID_MS_CRC_SHIFT)
-#define CHA_ID_SNW9_SHIFT 36
-#define CHA_ID_SNW9_MASK (0xfull << CHA_ID_SNW9_SHIFT)
+#define CHA_ID_MS_SNW9_SHIFT 4
+#define CHA_ID_MS_SNW9_MASK (0xfull << CHA_ID_MS_SNW9_SHIFT)
-#define CHA_ID_DECO_SHIFT 56
-#define CHA_ID_DECO_MASK (0xfull << CHA_ID_DECO_SHIFT)
+#define CHA_ID_MS_DECO_SHIFT 24
+#define CHA_ID_MS_DECO_MASK (0xfull << CHA_ID_MS_DECO_SHIFT)
-#define CHA_ID_JR_SHIFT 60
-#define CHA_ID_JR_MASK (0xfull << CHA_ID_JR_SHIFT)
+#define CHA_ID_MS_JR_SHIFT 28
+#define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
struct sec_vid {
u16 ip_id;
@@ -172,10 +188,14 @@ struct caam_perfmon {
u64 rsvd[13];
/* CAAM Hardware Instantiation Parameters fa0-fbf */
- u64 cha_rev; /* CRNR - CHA Revision Number */
-#define CTPR_QI_SHIFT 57
-#define CTPR_QI_MASK (0x1ull << CTPR_QI_SHIFT)
- u64 comp_parms; /* CTPR - Compile Parameters Register */
+ u32 cha_rev_ms; /* CRNR - CHA Rev No. Most significant half*/
+ u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
+#define CTPR_MS_QI_SHIFT 25
+#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_VIRT_EN_INCL 0x00000001
+#define CTPR_MS_VIRT_EN_POR 0x00000002
+ u32 comp_parms_ms; /* CTPR - Compile Parameters Register */
+ u32 comp_parms_ls; /* CTPR - Compile Parameters Register */
u64 rsvd1[2];
/* CAAM Global Status fc0-fdf */
@@ -189,9 +209,12 @@ struct caam_perfmon {
/* Component Instantiation Parameters fe0-fff */
u32 rtic_id; /* RVID - RTIC Version ID */
u32 ccb_id; /* CCBVID - CCB Version ID */
- u64 cha_id; /* CHAVID - CHA Version ID */
- u64 cha_num; /* CHANUM - CHA Number */
- u64 caam_id; /* CAAMVID - CAAM Version ID */
+ u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/
+ u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/
+ u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */
+ u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/
+ u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */
+ u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */
};
/* LIODN programming for DMA configuration */
@@ -304,9 +327,12 @@ struct caam_ctrl {
/* Bus Access Configuration Section 010-11f */
/* Read/Writable */
struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */
- u32 rsvd3[12];
+ u32 rsvd3[11];
+ u32 jrstart; /* JRSTART - Job Ring Start Register */
struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */
- u32 rsvd4[7];
+ u32 rsvd4[5];
+ u32 deco_rsr; /* DECORSR - Deco Request Source */
+ u32 rsvd11;
u32 deco_rq; /* DECORR - DECO Request */
struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
u32 rsvd5[22];
@@ -347,7 +373,10 @@ struct caam_ctrl {
#define MCFGR_DMA_RESET 0x10000000
#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
#define SCFGR_RDBENABLE 0x00000400
+#define SCFGR_VIRT_EN 0x00008000
#define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */
+#define DECORSR_JR0 0x00000001 /* JR to supply TZ, SDID, ICID */
+#define DECORSR_VALID 0x80000000
#define DECORR_DEN0 0x00010000 /* DECO0 available for access*/
/* AXI read cache control */
@@ -365,6 +394,12 @@ struct caam_ctrl {
#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
#define MCFGR_BURST_64 0x00000001 /* Max burst size */
+/* JRSTART register offsets */
+#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */
+#define JRSTART_JR1_START 0x00000002 /* Start Job ring 1 */
+#define JRSTART_JR2_START 0x00000004 /* Start Job ring 2 */
+#define JRSTART_JR3_START 0x00000008 /* Start Job ring 3 */
+
/*
* caam_job_ring - direct job ring setup
* 1-4 possible per instantiation, base + 1000/2000/3000/4000