aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/crypto/crypt_s390.h
diff options
context:
space:
mode:
authorJan Glauber <jan.glauber@de.ibm.com>2006-01-06 00:19:17 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 08:33:50 -0800
commitc1e26e1ef7ab50f30e5fbf004fe96ed44321ca78 (patch)
treed4319a9441da5b776637945f9413e702296f5ad3 /arch/s390/crypto/crypt_s390.h
parent[PATCH] s390: add oprofile callgraph support (diff)
downloadlinux-dev-c1e26e1ef7ab50f30e5fbf004fe96ed44321ca78.tar.xz
linux-dev-c1e26e1ef7ab50f30e5fbf004fe96ed44321ca78.zip
[PATCH] s390: in-kernel crypto rename
Replace all references to z990 by s390 in the in-kernel crypto files in arch/s390/crypto. The code is not specific to a particular machine (z990) but to the s390 platform. Big diff, does nothing.. Signed-off-by: Jan Glauber <jan.glauber@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to '')
-rw-r--r--arch/s390/crypto/crypt_s390.h (renamed from arch/s390/crypto/crypt_z990.h)243
1 files changed, 126 insertions, 117 deletions
diff --git a/arch/s390/crypto/crypt_z990.h b/arch/s390/crypto/crypt_s390.h
index 4df660b99e5a..4d24f6689755 100644
--- a/arch/s390/crypto/crypt_z990.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -1,7 +1,7 @@
/*
* Cryptographic API.
*
- * Support for z990 cryptographic instructions.
+ * Support for s390 cryptographic instructions.
*
* Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation
* Author(s): Thomas Spatzier (tspat@de.ibm.com)
@@ -12,76 +12,86 @@
* any later version.
*
*/
-#ifndef _CRYPTO_ARCH_S390_CRYPT_Z990_H
-#define _CRYPTO_ARCH_S390_CRYPT_Z990_H
+#ifndef _CRYPTO_ARCH_S390_CRYPT_S390_H
+#define _CRYPTO_ARCH_S390_CRYPT_S390_H
#include <asm/errno.h>
-#define CRYPT_Z990_OP_MASK 0xFF00
-#define CRYPT_Z990_FUNC_MASK 0x00FF
+#define CRYPT_S390_OP_MASK 0xFF00
+#define CRYPT_S390_FUNC_MASK 0x00FF
-
-/*z990 cryptographic operations*/
-enum crypt_z990_operations {
- CRYPT_Z990_KM = 0x0100,
- CRYPT_Z990_KMC = 0x0200,
- CRYPT_Z990_KIMD = 0x0300,
- CRYPT_Z990_KLMD = 0x0400,
- CRYPT_Z990_KMAC = 0x0500
+/* s930 cryptographic operations */
+enum crypt_s390_operations {
+ CRYPT_S390_KM = 0x0100,
+ CRYPT_S390_KMC = 0x0200,
+ CRYPT_S390_KIMD = 0x0300,
+ CRYPT_S390_KLMD = 0x0400,
+ CRYPT_S390_KMAC = 0x0500
};
-/*function codes for KM (CIPHER MESSAGE) instruction*/
-enum crypt_z990_km_func {
- KM_QUERY = CRYPT_Z990_KM | 0,
- KM_DEA_ENCRYPT = CRYPT_Z990_KM | 1,
- KM_DEA_DECRYPT = CRYPT_Z990_KM | 1 | 0x80, //modifier bit->decipher
- KM_TDEA_128_ENCRYPT = CRYPT_Z990_KM | 2,
- KM_TDEA_128_DECRYPT = CRYPT_Z990_KM | 2 | 0x80,
- KM_TDEA_192_ENCRYPT = CRYPT_Z990_KM | 3,
- KM_TDEA_192_DECRYPT = CRYPT_Z990_KM | 3 | 0x80,
+/* function codes for KM (CIPHER MESSAGE) instruction
+ * 0x80 is the decipher modifier bit
+ */
+enum crypt_s390_km_func {
+ KM_QUERY = CRYPT_S390_KM | 0,
+ KM_DEA_ENCRYPT = CRYPT_S390_KM | 1,
+ KM_DEA_DECRYPT = CRYPT_S390_KM | 1 | 0x80,
+ KM_TDEA_128_ENCRYPT = CRYPT_S390_KM | 2,
+ KM_TDEA_128_DECRYPT = CRYPT_S390_KM | 2 | 0x80,
+ KM_TDEA_192_ENCRYPT = CRYPT_S390_KM | 3,
+ KM_TDEA_192_DECRYPT = CRYPT_S390_KM | 3 | 0x80,
};
-/*function codes for KMC (CIPHER MESSAGE WITH CHAINING) instruction*/
-enum crypt_z990_kmc_func {
- KMC_QUERY = CRYPT_Z990_KMC | 0,
- KMC_DEA_ENCRYPT = CRYPT_Z990_KMC | 1,
- KMC_DEA_DECRYPT = CRYPT_Z990_KMC | 1 | 0x80, //modifier bit->decipher
- KMC_TDEA_128_ENCRYPT = CRYPT_Z990_KMC | 2,
- KMC_TDEA_128_DECRYPT = CRYPT_Z990_KMC | 2 | 0x80,
- KMC_TDEA_192_ENCRYPT = CRYPT_Z990_KMC | 3,
- KMC_TDEA_192_DECRYPT = CRYPT_Z990_KMC | 3 | 0x80,
+/* function codes for KMC (CIPHER MESSAGE WITH CHAINING)
+ * instruction
+ */
+enum crypt_s390_kmc_func {
+ KMC_QUERY = CRYPT_S390_KMC | 0,
+ KMC_DEA_ENCRYPT = CRYPT_S390_KMC | 1,
+ KMC_DEA_DECRYPT = CRYPT_S390_KMC | 1 | 0x80,
+ KMC_TDEA_128_ENCRYPT = CRYPT_S390_KMC | 2,
+ KMC_TDEA_128_DECRYPT = CRYPT_S390_KMC | 2 | 0x80,
+ KMC_TDEA_192_ENCRYPT = CRYPT_S390_KMC | 3,
+ KMC_TDEA_192_DECRYPT = CRYPT_S390_KMC | 3 | 0x80,
};
-/*function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) instruction*/
-enum crypt_z990_kimd_func {
- KIMD_QUERY = CRYPT_Z990_KIMD | 0,
- KIMD_SHA_1 = CRYPT_Z990_KIMD | 1,
+/* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
+ * instruction
+ */
+enum crypt_s390_kimd_func {
+ KIMD_QUERY = CRYPT_S390_KIMD | 0,
+ KIMD_SHA_1 = CRYPT_S390_KIMD | 1,
};
-/*function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) instruction*/
-enum crypt_z990_klmd_func {
- KLMD_QUERY = CRYPT_Z990_KLMD | 0,
- KLMD_SHA_1 = CRYPT_Z990_KLMD | 1,
+/* function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
+ * instruction
+ */
+enum crypt_s390_klmd_func {
+ KLMD_QUERY = CRYPT_S390_KLMD | 0,
+ KLMD_SHA_1 = CRYPT_S390_KLMD | 1,
};
-/*function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) instruction*/
-enum crypt_z990_kmac_func {
- KMAC_QUERY = CRYPT_Z990_KMAC | 0,
- KMAC_DEA = CRYPT_Z990_KMAC | 1,
- KMAC_TDEA_128 = CRYPT_Z990_KMAC | 2,
- KMAC_TDEA_192 = CRYPT_Z990_KMAC | 3
+/* function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ * instruction
+ */
+enum crypt_s390_kmac_func {
+ KMAC_QUERY = CRYPT_S390_KMAC | 0,
+ KMAC_DEA = CRYPT_S390_KMAC | 1,
+ KMAC_TDEA_128 = CRYPT_S390_KMAC | 2,
+ KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
};
-/*status word for z990 crypto instructions' QUERY functions*/
-struct crypt_z990_query_status {
+/* status word for s390 crypto instructions' QUERY functions */
+struct crypt_s390_query_status {
u64 high;
u64 low;
};
/*
- * Standard fixup and ex_table sections for crypt_z990 inline functions.
- * label 0: the z990 crypto operation
- * label 1: just after 1 to catch illegal operation exception on non-z990
+ * Standard fixup and ex_table sections for crypt_s390 inline functions.
+ * label 0: the s390 crypto operation
+ * label 1: just after 1 to catch illegal operation exception
+ * (unsupported model)
* label 6: the return point after fixup
* label 7: set error value if exception _in_ crypto operation
* label 8: set error value if illegal operation exception
@@ -89,7 +99,7 @@ struct crypt_z990_query_status {
* [ERR] is the error code value
*/
#ifndef __s390x__
-#define __crypt_z990_fixup \
+#define __crypt_s390_fixup \
".section .fixup,\"ax\" \n" \
"7: lhi %0,%h[e1] \n" \
" bras 1,9f \n" \
@@ -106,7 +116,7 @@ struct crypt_z990_query_status {
" .long 1b,8b \n" \
".previous"
#else /* __s390x__ */
-#define __crypt_z990_fixup \
+#define __crypt_s390_fixup \
".section .fixup,\"ax\" \n" \
"7: lhi %0,%h[e1] \n" \
" jg 6b \n" \
@@ -121,22 +131,22 @@ struct crypt_z990_query_status {
#endif /* __s390x__ */
/*
- * Standard code for setting the result of z990 crypto instructions.
+ * Standard code for setting the result of s390 crypto instructions.
* %0: the register which will receive the result
* [result]: the register containing the result (e.g. second operand length
* to compute number of processed bytes].
*/
#ifndef __s390x__
-#define __crypt_z990_set_result \
+#define __crypt_s390_set_result \
" lr %0,%[result] \n"
#else /* __s390x__ */
-#define __crypt_z990_set_result \
+#define __crypt_s390_set_result \
" lgr %0,%[result] \n"
#endif
/*
- * Executes the KM (CIPHER MESSAGE) operation of the z990 CPU.
- * @param func: the function code passed to KM; see crypt_z990_km_func
+ * Executes the KM (CIPHER MESSAGE) operation of the CPU.
+ * @param func: the function code passed to KM; see crypt_s390_km_func
* @param param: address of parameter block; see POP for details on each func
* @param dest: address of destination memory area
* @param src: address of source memory area
@@ -145,9 +155,9 @@ struct crypt_z990_query_status {
* for encryption/decryption funcs
*/
static inline int
-crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len)
+crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len)
{
- register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param;
register u8* __dest asm("4") = dest;
register const u8* __src asm("2") = src;
@@ -156,26 +166,26 @@ crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len)
ret = 0;
__asm__ __volatile__ (
- "0: .insn rre,0xB92E0000,%1,%2 \n" //KM opcode
- "1: brc 1,0b \n" //handle partial completion
- __crypt_z990_set_result
+ "0: .insn rre,0xB92E0000,%1,%2 \n" /* KM opcode */
+ "1: brc 1,0b \n" /* handle partial completion */
+ __crypt_s390_set_result
"6: \n"
- __crypt_z990_fixup
+ __crypt_s390_fixup
: "+d" (ret), "+a" (__dest), "+a" (__src),
[result] "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
"a" (__param)
: "cc", "memory"
);
- if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){
+ if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
ret = src_len - ret;
}
return ret;
}
/*
- * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the z990 CPU.
- * @param func: the function code passed to KM; see crypt_z990_kmc_func
+ * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
+ * @param func: the function code passed to KM; see crypt_s390_kmc_func
* @param param: address of parameter block; see POP for details on each func
* @param dest: address of destination memory area
* @param src: address of source memory area
@@ -184,9 +194,9 @@ crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len)
* for encryption/decryption funcs
*/
static inline int
-crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
+crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
{
- register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param;
register u8* __dest asm("4") = dest;
register const u8* __src asm("2") = src;
@@ -195,18 +205,18 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
ret = 0;
__asm__ __volatile__ (
- "0: .insn rre,0xB92F0000,%1,%2 \n" //KMC opcode
- "1: brc 1,0b \n" //handle partial completion
- __crypt_z990_set_result
+ "0: .insn rre,0xB92F0000,%1,%2 \n" /* KMC opcode */
+ "1: brc 1,0b \n" /* handle partial completion */
+ __crypt_s390_set_result
"6: \n"
- __crypt_z990_fixup
+ __crypt_s390_fixup
: "+d" (ret), "+a" (__dest), "+a" (__src),
[result] "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
"a" (__param)
: "cc", "memory"
);
- if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){
+ if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
ret = src_len - ret;
}
return ret;
@@ -214,8 +224,8 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
/*
* Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
- * of the z990 CPU.
- * @param func: the function code passed to KM; see crypt_z990_kimd_func
+ * of the CPU.
+ * @param func: the function code passed to KM; see crypt_s390_kimd_func
* @param param: address of parameter block; see POP for details on each func
* @param src: address of source memory area
* @param src_len: length of src operand in bytes
@@ -223,9 +233,9 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
* for digest funcs
*/
static inline int
-crypt_z990_kimd(long func, void* param, const u8* src, long src_len)
+crypt_s390_kimd(long func, void* param, const u8* src, long src_len)
{
- register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param;
register const u8* __src asm("2") = src;
register long __src_len asm("3") = src_len;
@@ -233,25 +243,25 @@ crypt_z990_kimd(long func, void* param, const u8* src, long src_len)
ret = 0;
__asm__ __volatile__ (
- "0: .insn rre,0xB93E0000,%1,%1 \n" //KIMD opcode
- "1: brc 1,0b \n" /*handle partical completion of kimd*/
- __crypt_z990_set_result
+ "0: .insn rre,0xB93E0000,%1,%1 \n" /* KIMD opcode */
+ "1: brc 1,0b \n" /* handle partical completion */
+ __crypt_s390_set_result
"6: \n"
- __crypt_z990_fixup
+ __crypt_s390_fixup
: "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
"a" (__param)
: "cc", "memory"
);
- if (ret >= 0 && (func & CRYPT_Z990_FUNC_MASK)){
+ if (ret >= 0 && (func & CRYPT_S390_FUNC_MASK)){
ret = src_len - ret;
}
return ret;
}
/*
- * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the z990 CPU.
- * @param func: the function code passed to KM; see crypt_z990_klmd_func
+ * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
+ * @param func: the function code passed to KM; see crypt_s390_klmd_func
* @param param: address of parameter block; see POP for details on each func
* @param src: address of source memory area
* @param src_len: length of src operand in bytes
@@ -259,9 +269,9 @@ crypt_z990_kimd(long func, void* param, const u8* src, long src_len)
* for digest funcs
*/
static inline int
-crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
+crypt_s390_klmd(long func, void* param, const u8* src, long src_len)
{
- register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param;
register const u8* __src asm("2") = src;
register long __src_len asm("3") = src_len;
@@ -269,17 +279,17 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
ret = 0;
__asm__ __volatile__ (
- "0: .insn rre,0xB93F0000,%1,%1 \n" //KLMD opcode
- "1: brc 1,0b \n" /*handle partical completion of klmd*/
- __crypt_z990_set_result
+ "0: .insn rre,0xB93F0000,%1,%1 \n" /* KLMD opcode */
+ "1: brc 1,0b \n" /* handle partical completion */
+ __crypt_s390_set_result
"6: \n"
- __crypt_z990_fixup
+ __crypt_s390_fixup
: "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
"a" (__param)
: "cc", "memory"
);
- if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){
+ if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
ret = src_len - ret;
}
return ret;
@@ -287,8 +297,8 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
/*
* Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
- * of the z990 CPU.
- * @param func: the function code passed to KM; see crypt_z990_klmd_func
+ * of the CPU.
+ * @param func: the function code passed to KM; see crypt_s390_klmd_func
* @param param: address of parameter block; see POP for details on each func
* @param src: address of source memory area
* @param src_len: length of src operand in bytes
@@ -296,9 +306,9 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
* for digest funcs
*/
static inline int
-crypt_z990_kmac(long func, void* param, const u8* src, long src_len)
+crypt_s390_kmac(long func, void* param, const u8* src, long src_len)
{
- register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param;
register const u8* __src asm("2") = src;
register long __src_len asm("3") = src_len;
@@ -306,58 +316,58 @@ crypt_z990_kmac(long func, void* param, const u8* src, long src_len)
ret = 0;
__asm__ __volatile__ (
- "0: .insn rre,0xB91E0000,%5,%5 \n" //KMAC opcode
- "1: brc 1,0b \n" /*handle partical completion of klmd*/
- __crypt_z990_set_result
+ "0: .insn rre,0xB91E0000,%5,%5 \n" /* KMAC opcode */
+ "1: brc 1,0b \n" /* handle partical completion */
+ __crypt_s390_set_result
"6: \n"
- __crypt_z990_fixup
+ __crypt_s390_fixup
: "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
"a" (__param)
: "cc", "memory"
);
- if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){
+ if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
ret = src_len - ret;
}
return ret;
}
/**
- * Tests if a specific z990 crypto function is implemented on the machine.
+ * Tests if a specific crypto function is implemented on the machine.
* @param func: the function code of the specific function; 0 if op in general
* @return 1 if func available; 0 if func or op in general not available
*/
static inline int
-crypt_z990_func_available(int func)
+crypt_s390_func_available(int func)
{
int ret;
- struct crypt_z990_query_status status = {
+ struct crypt_s390_query_status status = {
.high = 0,
.low = 0
};
- switch (func & CRYPT_Z990_OP_MASK){
- case CRYPT_Z990_KM:
- ret = crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0);
+ switch (func & CRYPT_S390_OP_MASK){
+ case CRYPT_S390_KM:
+ ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
break;
- case CRYPT_Z990_KMC:
- ret = crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0);
+ case CRYPT_S390_KMC:
+ ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
break;
- case CRYPT_Z990_KIMD:
- ret = crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0);
+ case CRYPT_S390_KIMD:
+ ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
break;
- case CRYPT_Z990_KLMD:
- ret = crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0);
+ case CRYPT_S390_KLMD:
+ ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
break;
- case CRYPT_Z990_KMAC:
- ret = crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0);
+ case CRYPT_S390_KMAC:
+ ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
break;
default:
ret = 0;
return ret;
}
if (ret >= 0){
- func &= CRYPT_Z990_FUNC_MASK;
+ func &= CRYPT_S390_FUNC_MASK;
func &= 0x7f; //mask modifier bit
if (func < 64){
ret = (status.high >> (64 - func - 1)) & 0x1;
@@ -370,5 +380,4 @@ crypt_z990_func_available(int func)
return ret;
}
-
-#endif // _CRYPTO_ARCH_S390_CRYPT_Z990_H
+#endif // _CRYPTO_ARCH_S390_CRYPT_S390_H