aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/crypto
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-12-10 16:21:55 -0500
committerDavid S. Miller <davem@davemloft.net>2016-12-10 16:21:55 -0500
commit821781a9f40673c2aa0f29d9d8226ec320dff20c (patch)
treec9d5cb8a184fff84a9d841d8cb5da4b26be5c551 /crypto
parentnet: skb_condense() can also deal with empty skbs (diff)
parentMerge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6 (diff)
downloadwireguard-linux-821781a9f40673c2aa0f29d9d8226ec320dff20c.tar.xz
wireguard-linux-821781a9f40673c2aa0f29d9d8226ec320dff20c.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'crypto')
-rw-r--r--crypto/algif_aead.c59
-rw-r--r--crypto/mcryptd.c19
2 files changed, 49 insertions, 29 deletions
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 8948392c0525..235f54d4f8a9 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -81,7 +81,11 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx)
{
unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
- return ctx->used >= ctx->aead_assoclen + as;
+ /*
+ * The minimum amount of memory needed for an AEAD cipher is
+ * the AAD and in case of decryption the tag.
+ */
+ return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
}
static void aead_reset_ctx(struct aead_ctx *ctx)
@@ -415,7 +419,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
unsigned int i, reqlen = GET_REQ_SIZE(tfm);
int err = -ENOMEM;
unsigned long used;
- size_t outlen;
+ size_t outlen = 0;
size_t usedpages = 0;
lock_sock(sk);
@@ -425,12 +429,15 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
goto unlock;
}
- used = ctx->used;
- outlen = used;
-
if (!aead_sufficient_data(ctx))
goto unlock;
+ used = ctx->used;
+ if (ctx->enc)
+ outlen = used + as;
+ else
+ outlen = used - as;
+
req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
if (unlikely(!req))
goto unlock;
@@ -444,7 +451,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
aead_request_set_ad(req, ctx->aead_assoclen);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
aead_async_cb, sk);
- used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
+ used -= ctx->aead_assoclen;
/* take over all tx sgls from ctx */
areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
@@ -460,7 +467,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
areq->tsgls = sgl->cur;
/* create rx sgls */
- while (iov_iter_count(&msg->msg_iter)) {
+ while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
(outlen - usedpages));
@@ -490,16 +497,14 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
last_rsgl = rsgl;
- /* we do not need more iovecs as we have sufficient memory */
- if (outlen <= usedpages)
- break;
-
iov_iter_advance(&msg->msg_iter, err);
}
- err = -EINVAL;
+
/* ensure output buffer is sufficiently large */
- if (usedpages < outlen)
- goto free;
+ if (usedpages < outlen) {
+ err = -EINVAL;
+ goto unlock;
+ }
aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
areq->iv);
@@ -570,6 +575,7 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
goto unlock;
}
+ /* data length provided by caller via sendmsg/sendpage */
used = ctx->used;
/*
@@ -584,16 +590,27 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
if (!aead_sufficient_data(ctx))
goto unlock;
- outlen = used;
+ /*
+ * Calculate the minimum output buffer size holding the result of the
+ * cipher operation. When encrypting data, the receiving buffer is
+ * larger by the tag length compared to the input buffer as the
+ * encryption operation generates the tag. For decryption, the input
+ * buffer provides the tag which is consumed resulting in only the
+ * plaintext without a buffer for the tag returned to the caller.
+ */
+ if (ctx->enc)
+ outlen = used + as;
+ else
+ outlen = used - as;
/*
* The cipher operation input data is reduced by the associated data
* length as this data is processed separately later on.
*/
- used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
+ used -= ctx->aead_assoclen;
/* convert iovecs of output buffers into scatterlists */
- while (iov_iter_count(&msg->msg_iter)) {
+ while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
(outlen - usedpages));
@@ -620,16 +637,14 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
last_rsgl = rsgl;
- /* we do not need more iovecs as we have sufficient memory */
- if (outlen <= usedpages)
- break;
iov_iter_advance(&msg->msg_iter, err);
}
- err = -EINVAL;
/* ensure output buffer is sufficiently large */
- if (usedpages < outlen)
+ if (usedpages < outlen) {
+ err = -EINVAL;
goto unlock;
+ }
sg_mark_end(sgl->sg + sgl->cur - 1);
aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index 94ee44acd465..c207458d6299 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -254,18 +254,22 @@ out_free_inst:
goto out;
}
-static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
+static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
u32 *mask)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return;
- if ((algt->type & CRYPTO_ALG_INTERNAL))
- *type |= CRYPTO_ALG_INTERNAL;
- if ((algt->mask & CRYPTO_ALG_INTERNAL))
- *mask |= CRYPTO_ALG_INTERNAL;
+ return false;
+
+ *type |= algt->type & CRYPTO_ALG_INTERNAL;
+ *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
+
+ if (*type & *mask & CRYPTO_ALG_INTERNAL)
+ return true;
+ else
+ return false;
}
static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
@@ -492,7 +496,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
u32 mask = 0;
int err;
- mcryptd_check_internal(tb, &type, &mask);
+ if (!mcryptd_check_internal(tb, &type, &mask))
+ return -EINVAL;
halg = ahash_attr_alg(tb[1], type, mask);
if (IS_ERR(halg))