aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam/sg_sw_qm.h
diff options
context:
space:
mode:
authorHoria Geantă <horia.geanta@nxp.com>2019-06-10 16:30:58 +0300
committerHerbert Xu <herbert@gondor.apana.org.au>2019-06-20 14:18:33 +0800
commit059d73eea6409873446a858dd64a5bec9bf68b70 (patch)
tree64bcd3c478e63f213731115d7f132d9e42cd0a0b /drivers/crypto/caam/sg_sw_qm.h
parenthwrng: iproc-rng200 - Add support for 7211 (diff)
downloadlinux-dev-059d73eea6409873446a858dd64a5bec9bf68b70.tar.xz
linux-dev-059d73eea6409873446a858dd64a5bec9bf68b70.zip
crypto: caam - use len instead of nents for bulding HW S/G table
Currently, conversion of SW S/G table into HW S/G layout relies on nents returned by sg_nents_for_len(sg, len). However this leaves the possibility of HW S/G referencing more data then needed: since buffer length in HW S/G entries is filled using sg_dma_len(sg), the last entry in HW S/G table might have a length that is bigger than needed for the crypto request. This way of S/G table conversion is fine, unless after converting a table more entries have to be appended to the HW S/G table. In this case, crypto engine would access data from the S/G entry having the incorrect length, instead of advancing in the S/G table. This situation doesn't exist, but the upcoming implementation of IV update for skcipher algorithms needs to add a S/G entry after req->dst S/G (corresponding to output IV). Signed-off-by: Horia Geantă <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/caam/sg_sw_qm.h')
-rw-r--r--drivers/crypto/caam/sg_sw_qm.h18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
index b3e1aaaeffea..d56cc7efbc13 100644
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -54,15 +54,19 @@ static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
* but does not have final bit; instead, returns last entry
*/
static inline struct qm_sg_entry *
-sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+sg_to_qm_sg(struct scatterlist *sg, int len,
struct qm_sg_entry *qm_sg_ptr, u16 offset)
{
- while (sg_count && sg) {
- dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
- sg_dma_len(sg), offset);
+ int ent_len;
+
+ while (len) {
+ ent_len = min_t(int, sg_dma_len(sg), len);
+
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
+ offset);
qm_sg_ptr++;
sg = sg_next(sg);
- sg_count--;
+ len -= ent_len;
}
return qm_sg_ptr - 1;
}
@@ -71,10 +75,10 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
-static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
struct qm_sg_entry *qm_sg_ptr, u16 offset)
{
- qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+ qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
}