aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c')
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c124
1 files changed, 62 insertions, 62 deletions
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
index 4b28931bbc96..c106a9e049a7 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
@@ -54,6 +54,7 @@
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/mutex.h>
+#include <linux/crypto.h>
#include <obd.h>
#include <obd_class.h>
@@ -147,14 +148,14 @@ static const char * enctype2str(__u32 enctype)
static
int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
{
- kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
+ kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0);
if (IS_ERR(kb->kb_tfm)) {
CERROR("failed to alloc tfm: %s, mode %d\n",
alg_name, alg_mode);
return -1;
}
- if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
+ if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
CERROR("failed to set %s key, len %d\n",
alg_name, kb->kb_key.len);
return -1;
@@ -197,7 +198,7 @@ void keyblock_free(struct krb5_keyblock *kb)
{
rawobj_free(&kb->kb_key);
if (kb->kb_tfm)
- ll_crypto_free_blkcipher(kb->kb_tfm);
+ crypto_free_blkcipher(kb->kb_tfm);
}
static
@@ -341,7 +342,7 @@ __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
if (p != end)
goto out_err;
- CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
+ CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
return 0;
out_err:
return GSS_S_FAILURE;
@@ -403,7 +404,7 @@ __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
goto out_err;
- CDEBUG(D_SEC, "succesfully imported v2 context\n");
+ CDEBUG(D_SEC, "successfully imported v2 context\n");
return 0;
out_err:
return GSS_S_FAILURE;
@@ -494,7 +495,7 @@ __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
goto out_err;
gctx_new->internal_ctx_id = knew;
- CDEBUG(D_SEC, "succesfully copied reverse context\n");
+ CDEBUG(D_SEC, "successfully copied reverse context\n");
return GSS_S_COMPLETE;
out_err:
@@ -529,7 +530,7 @@ void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
}
static
-__u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
+__u32 krb5_encrypt(struct crypto_blkcipher *tfm,
int decrypt,
void * iv,
void * in,
@@ -546,27 +547,27 @@ __u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
desc.info = local_iv;
desc.flags= 0;
- if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
+ if (length % crypto_blkcipher_blocksize(tfm) != 0) {
CERROR("output length %d mismatch blocksize %d\n",
- length, ll_crypto_blkcipher_blocksize(tfm));
+ length, crypto_blkcipher_blocksize(tfm));
goto out;
}
- if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
- CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
+ if (crypto_blkcipher_ivsize(tfm) > 16) {
+ CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
goto out;
}
if (iv)
- memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
+ memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
memcpy(out, in, length);
buf_to_sg(&sg, out, length);
if (decrypt)
- ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
+ ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
else
- ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
+ ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
out:
return(ret);
@@ -574,7 +575,7 @@ out:
static inline
-int krb5_digest_hmac(struct ll_crypto_hash *tfm,
+int krb5_digest_hmac(struct crypto_hash *tfm,
rawobj_t *key,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
@@ -585,17 +586,17 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm,
struct scatterlist sg[1];
int i;
- ll_crypto_hash_setkey(tfm, key->data, key->len);
+ crypto_hash_setkey(tfm, key->data, key->len);
desc.tfm = tfm;
desc.flags= 0;
- ll_crypto_hash_init(&desc);
+ crypto_hash_init(&desc);
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- ll_crypto_hash_update(&desc, sg, msgs[i].len);
+ crypto_hash_update(&desc, sg, msgs[i].len);
}
for (i = 0; i < iovcnt; i++) {
@@ -604,20 +605,20 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm,
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
- ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+ crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
+ crypto_hash_update(&desc, sg, sizeof(*khdr));
}
- return ll_crypto_hash_final(&desc, cksum->data);
+ return crypto_hash_final(&desc, cksum->data);
}
static inline
-int krb5_digest_norm(struct ll_crypto_hash *tfm,
+int krb5_digest_norm(struct crypto_hash *tfm,
struct krb5_keyblock *kb,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
@@ -632,13 +633,13 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm,
desc.tfm = tfm;
desc.flags= 0;
- ll_crypto_hash_init(&desc);
+ crypto_hash_init(&desc);
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- ll_crypto_hash_update(&desc, sg, msgs[i].len);
+ crypto_hash_update(&desc, sg, msgs[i].len);
}
for (i = 0; i < iovcnt; i++) {
@@ -647,15 +648,15 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm,
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
- ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+ crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
+ crypto_hash_update(&desc, sg, sizeof(*khdr));
}
- ll_crypto_hash_final(&desc, cksum->data);
+ crypto_hash_final(&desc, cksum->data);
return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
cksum->data, cksum->len);
@@ -674,7 +675,7 @@ __s32 krb5_make_checksum(__u32 enctype,
rawobj_t *cksum)
{
struct krb5_enctype *ke = &enctypes[enctype];
- struct ll_crypto_hash *tfm;
+ struct crypto_hash *tfm;
__u32 code = GSS_S_FAILURE;
int rc;
@@ -683,7 +684,7 @@ __s32 krb5_make_checksum(__u32 enctype,
return GSS_S_FAILURE;
}
- cksum->len = ll_crypto_hash_digestsize(tfm);
+ cksum->len = crypto_hash_digestsize(tfm);
OBD_ALLOC_LARGE(cksum->data, cksum->len);
if (!cksum->data) {
cksum->len = 0;
@@ -700,7 +701,7 @@ __s32 krb5_make_checksum(__u32 enctype,
if (rc == 0)
code = GSS_S_COMPLETE;
out_tfm:
- ll_crypto_free_hash(tfm);
+ crypto_free_hash(tfm);
return code;
}
@@ -878,7 +879,7 @@ int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
}
static
-int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
int mode_ecb,
int inobj_cnt,
rawobj_t *inobjs,
@@ -890,7 +891,6 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
__u8 local_iv[16] = {0}, *buf;
__u32 datalen = 0;
int i, rc;
- ENTRY;
buf = outobj->data;
desc.tfm = tfm;
@@ -905,23 +905,23 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
if (mode_ecb) {
if (enc)
- rc = ll_crypto_blkcipher_encrypt(
+ rc = crypto_blkcipher_encrypt(
&desc, &dst, &src, src.length);
else
- rc = ll_crypto_blkcipher_decrypt(
+ rc = crypto_blkcipher_decrypt(
&desc, &dst, &src, src.length);
} else {
if (enc)
- rc = ll_crypto_blkcipher_encrypt_iv(
+ rc = crypto_blkcipher_encrypt_iv(
&desc, &dst, &src, src.length);
else
- rc = ll_crypto_blkcipher_decrypt_iv(
+ rc = crypto_blkcipher_decrypt_iv(
&desc, &dst, &src, src.length);
}
if (rc) {
CERROR("encrypt error %d\n", rc);
- RETURN(rc);
+ return rc;
}
datalen += inobjs[i].len;
@@ -929,14 +929,14 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
}
outobj->len = datalen;
- RETURN(0);
+ return 0;
}
/*
* if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
*/
static
-int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
struct krb5_header *khdr,
char *confounder,
struct ptlrpc_bulk_desc *desc,
@@ -951,7 +951,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
LASSERT(desc->bd_iov_count);
LASSERT(desc->bd_enc_iov);
- blocksize = ll_crypto_blkcipher_blocksize(tfm);
+ blocksize = crypto_blkcipher_blocksize(tfm);
LASSERT(blocksize > 1);
LASSERT(cipher->len == blocksize + sizeof(*khdr));
@@ -963,7 +963,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
buf_to_sg(&src, confounder, blocksize);
buf_to_sg(&dst, cipher->data, blocksize);
- rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
if (rc) {
CERROR("error to encrypt confounder: %d\n", rc);
return rc;
@@ -983,7 +983,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
desc->bd_enc_iov[i].kiov_offset = dst.offset;
desc->bd_enc_iov[i].kiov_len = dst.length;
- rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
src.length);
if (rc) {
CERROR("error to encrypt page: %d\n", rc);
@@ -995,7 +995,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
buf_to_sg(&src, khdr, sizeof(*khdr));
buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
- rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc,
&dst, &src, sizeof(*khdr));
if (rc) {
CERROR("error to encrypt krb5 header: %d\n", rc);
@@ -1025,7 +1025,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
* should have been done by prep_bulk().
*/
static
-int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
struct krb5_header *khdr,
struct ptlrpc_bulk_desc *desc,
rawobj_t *cipher,
@@ -1042,7 +1042,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
LASSERT(desc->bd_enc_iov);
LASSERT(desc->bd_nob_transferred);
- blocksize = ll_crypto_blkcipher_blocksize(tfm);
+ blocksize = crypto_blkcipher_blocksize(tfm);
LASSERT(blocksize > 1);
LASSERT(cipher->len == blocksize + sizeof(*khdr));
@@ -1059,7 +1059,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
buf_to_sg(&src, cipher->data, blocksize);
buf_to_sg(&dst, plain->data, blocksize);
- rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
if (rc) {
CERROR("error to decrypt confounder: %d\n", rc);
return rc;
@@ -1102,7 +1102,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
if (desc->bd_iov[i].kiov_len % blocksize == 0)
sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
- rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
src.length);
if (rc) {
CERROR("error to decrypt page: %d\n", rc);
@@ -1142,7 +1142,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
- rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc,
&dst, &src, sizeof(*khdr));
if (rc) {
CERROR("error to decrypt tail: %d\n", rc);
@@ -1177,7 +1177,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
LASSERT(kctx->kc_keye.kb_tfm == NULL ||
ke->ke_conf_size >=
- ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+ crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
/*
* final token format:
@@ -1201,7 +1201,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
LASSERT(blocksize <= ke->ke_conf_size);
@@ -1248,7 +1248,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
rawobj_t arc4_keye;
- struct ll_crypto_cipher *arc4_tfm;
+ struct crypto_blkcipher *arc4_tfm;
if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
@@ -1256,13 +1256,13 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
GOTO(arc4_out, rc = -EACCES);
}
- arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
if (IS_ERR(arc4_tfm)) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, rc = -EACCES);
}
- if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
+ if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
@@ -1272,7 +1272,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
3, data_desc, &cipher, 1);
arc4_out_tfm:
- ll_crypto_free_blkcipher(arc4_tfm);
+ crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
@@ -1310,7 +1310,7 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
LASSERT(desc->bd_enc_iov);
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
LASSERT(desc->bd_enc_iov[i].kiov_page);
@@ -1371,7 +1371,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/*
@@ -1481,7 +1481,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/* expected token layout:
@@ -1521,7 +1521,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
rawobj_t arc4_keye;
- struct ll_crypto_cipher *arc4_tfm;
+ struct crypto_blkcipher *arc4_tfm;
cksum.data = token->data + token->len - ke->ke_hash_size;
cksum.len = ke->ke_hash_size;
@@ -1532,13 +1532,13 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
GOTO(arc4_out, rc = -EACCES);
}
- arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
if (IS_ERR(arc4_tfm)) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, rc = -EACCES);
}
- if (ll_crypto_blkcipher_setkey(arc4_tfm,
+ if (crypto_blkcipher_setkey(arc4_tfm,
arc4_keye.data, arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
@@ -1548,7 +1548,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1, &cipher_in, &plain_out, 0);
arc4_out_tfm:
- ll_crypto_free_blkcipher(arc4_tfm);
+ crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
@@ -1647,7 +1647,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
LBUG();
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);