summaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2018-08-07 00:31:24 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2020-02-19 21:48:13 +0100
commit118398c1c6540d44a070266cf3a39c5077775c8e (patch)
tree30e308280549ba97f4763ee52a7ca088556f5a1c
parentcompat: RHEL 7 backported skb_ensure_writable() (diff)
downloadwireguard-linux-compat-118398c1c6540d44a070266cf3a39c5077775c8e.tar.xz
wireguard-linux-compat-118398c1c6540d44a070266cf3a39c5077775c8e.zip
curve25519-x86_64: avoid use of r12
This causes problems with RAP and KERNEXEC for PaX, as r12 is a reserved register. It also leads to a more compact instruction encoding, saving about 100 cycles. Suggested-by: PaX Team <pageexec@freemail.hu> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
-rw-r--r--src/crypto/zinc/curve25519/curve25519-x86_64.c110
1 files changed, 55 insertions, 55 deletions
diff --git a/src/crypto/zinc/curve25519/curve25519-x86_64.c b/src/crypto/zinc/curve25519/curve25519-x86_64.c
index 74570cb..79716c4 100644
--- a/src/crypto/zinc/curve25519/curve25519-x86_64.c
+++ b/src/crypto/zinc/curve25519/curve25519-x86_64.c
@@ -156,28 +156,28 @@ static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
" movq 0(%1), %%rdx;"
" mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);"
" mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);"
- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;"
/* Compute src1[1] * src2 */
" movq 8(%1), %%rdx;"
" mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 16(%0);"
- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
/* Compute src1[2] * src2 */
" movq 16(%1), %%rdx;"
" mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 24(%0);"
- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
/* Compute src1[3] * src2 */
" movq 24(%1), %%rdx;"
" mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 32(%0);"
- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " movq %%r12, 40(%0);" " mov $0, %%r8;"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 56(%0);"
/* Line up pointers */
@@ -191,11 +191,11 @@ static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
" mulxq 32(%1), %%r8, %%r13;"
" xor %3, %3;"
" adoxq 0(%1), %%r8;"
- " mulxq 40(%1), %%r9, %%r12;"
+ " mulxq 40(%1), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
" adoxq 8(%1), %%r9;"
" mulxq 48(%1), %%r10, %%r13;"
- " adcx %%r12, %%r10;"
+ " adcx %%rbx, %%r10;"
" adoxq 16(%1), %%r10;"
" mulxq 56(%1), %%r11, %%rax;"
" adcx %%r13, %%r11;"
@@ -220,7 +220,7 @@ static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
" movq %%r8, 0(%0);"
: "+&r" (tmp), "+&r" (f1), "+&r" (out), "+&r" (f2)
:
- : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "memory", "cc"
+ : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "memory", "cc"
);
}
@@ -237,28 +237,28 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
" movq 0(%1), %%rdx;"
" mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);"
" mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);"
- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;"
/* Compute src1[1] * src2 */
" movq 8(%1), %%rdx;"
" mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 16(%0);"
- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
/* Compute src1[2] * src2 */
" movq 16(%1), %%rdx;"
" mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 24(%0);"
- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
/* Compute src1[3] * src2 */
" movq 24(%1), %%rdx;"
" mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);"
- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 32(%0);"
- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " movq %%r12, 40(%0);" " mov $0, %%r8;"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;"
" mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 56(%0);"
@@ -268,28 +268,28 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
" movq 32(%1), %%rdx;"
" mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 64(%0);"
" mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 72(%0);"
- " mulxq 48(%3), %%r12, %%r13;" " adox %%r11, %%r12;"
+ " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;"
" mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;"
/* Compute src1[1] * src2 */
" movq 40(%1), %%rdx;"
" mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 72(%0), %%r8;" " movq %%r8, 72(%0);"
- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 80(%0);"
- " mulxq 48(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;"
+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 80(%0);"
+ " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
" mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
/* Compute src1[2] * src2 */
" movq 48(%1), %%rdx;"
" mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 80(%0), %%r8;" " movq %%r8, 80(%0);"
- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 88(%0);"
- " mulxq 48(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;"
+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 88(%0);"
+ " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
" mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
/* Compute src1[3] * src2 */
" movq 56(%1), %%rdx;"
" mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 88(%0), %%r8;" " movq %%r8, 88(%0);"
- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 96(%0);"
- " mulxq 48(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " movq %%r12, 104(%0);" " mov $0, %%r8;"
+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 96(%0);"
+ " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 104(%0);" " mov $0, %%r8;"
" mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 112(%0);" " mov $0, %%rax;"
" adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 120(%0);"
/* Line up pointers */
@@ -303,11 +303,11 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
" mulxq 32(%1), %%r8, %%r13;"
" xor %3, %3;"
" adoxq 0(%1), %%r8;"
- " mulxq 40(%1), %%r9, %%r12;"
+ " mulxq 40(%1), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
" adoxq 8(%1), %%r9;"
" mulxq 48(%1), %%r10, %%r13;"
- " adcx %%r12, %%r10;"
+ " adcx %%rbx, %%r10;"
" adoxq 16(%1), %%r10;"
" mulxq 56(%1), %%r11, %%rax;"
" adcx %%r13, %%r11;"
@@ -336,11 +336,11 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
" mulxq 96(%1), %%r8, %%r13;"
" xor %3, %3;"
" adoxq 64(%1), %%r8;"
- " mulxq 104(%1), %%r9, %%r12;"
+ " mulxq 104(%1), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
" adoxq 72(%1), %%r9;"
" mulxq 112(%1), %%r10, %%r13;"
- " adcx %%r12, %%r10;"
+ " adcx %%rbx, %%r10;"
" adoxq 80(%1), %%r10;"
" mulxq 120(%1), %%r11, %%rax;"
" adcx %%r13, %%r11;"
@@ -365,7 +365,7 @@ static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
" movq %%r8, 32(%0);"
: "+&r" (tmp), "+&r" (f1), "+&r" (out), "+&r" (f2)
:
- : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "memory", "cc"
+ : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "memory", "cc"
);
}
@@ -377,11 +377,11 @@ static inline void fmul_scalar(u64 *out, const u64 *f1, u64 f2)
asm volatile(
/* Compute the raw multiplication of f1*f2 */
" mulxq 0(%2), %%r8, %%rcx;" /* f1[0]*f2 */
- " mulxq 8(%2), %%r9, %%r12;" /* f1[1]*f2 */
+ " mulxq 8(%2), %%r9, %%rbx;" /* f1[1]*f2 */
" add %%rcx, %%r9;"
" mov $0, %%rcx;"
" mulxq 16(%2), %%r10, %%r13;" /* f1[2]*f2 */
- " adcx %%r12, %%r10;"
+ " adcx %%rbx, %%r10;"
" mulxq 24(%2), %%r11, %%rax;" /* f1[3]*f2 */
" adcx %%r13, %%r11;"
" adcx %%rcx, %%rax;"
@@ -408,7 +408,7 @@ static inline void fmul_scalar(u64 *out, const u64 *f1, u64 f2)
" movq %%r8, 0(%1);"
: "+&r" (f2_r)
: "r" (out), "r" (f1)
- : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "memory", "cc"
+ : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "memory", "cc"
);
}
@@ -509,8 +509,8 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
" mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */
" mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */
" movq 24(%1), %%rdx;" /* f[3] */
- " mulxq 8(%1), %%r11, %%r12;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */
- " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%r12;" /* f[2]*f[3] */
+ " mulxq 8(%1), %%r11, %%rbx;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */
+ " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%rbx;" /* f[2]*f[3] */
" movq 8(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */
" mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */
@@ -520,12 +520,12 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
" adcx %%r8, %%r8;"
" adox %%rcx, %%r11;"
" adcx %%r9, %%r9;"
- " adox %%r15, %%r12;"
+ " adox %%r15, %%rbx;"
" adcx %%r10, %%r10;"
" adox %%r15, %%r13;"
" adcx %%r11, %%r11;"
" adox %%r15, %%r14;"
- " adcx %%r12, %%r12;"
+ " adcx %%rbx, %%rbx;"
" adcx %%r13, %%r13;"
" adcx %%r14, %%r14;"
@@ -538,7 +538,7 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
" adcx %%rcx, %%r10;" " movq %%r10, 24(%0);"
" movq 16(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
" adcx %%rax, %%r11;" " movq %%r11, 32(%0);"
- " adcx %%rcx, %%r12;" " movq %%r12, 40(%0);"
+ " adcx %%rcx, %%rbx;" " movq %%rbx, 40(%0);"
" movq 24(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
" adcx %%rax, %%r13;" " movq %%r13, 48(%0);"
" adcx %%rcx, %%r14;" " movq %%r14, 56(%0);"
@@ -554,11 +554,11 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
" mulxq 32(%1), %%r8, %%r13;"
" xor %%rcx, %%rcx;"
" adoxq 0(%1), %%r8;"
- " mulxq 40(%1), %%r9, %%r12;"
+ " mulxq 40(%1), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
" adoxq 8(%1), %%r9;"
" mulxq 48(%1), %%r10, %%r13;"
- " adcx %%r12, %%r10;"
+ " adcx %%rbx, %%r10;"
" adoxq 16(%1), %%r10;"
" mulxq 56(%1), %%r11, %%rax;"
" adcx %%r13, %%r11;"
@@ -583,7 +583,7 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
" movq %%r8, 0(%0);"
: "+&r" (tmp), "+&r" (f), "+&r" (out)
:
- : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "memory", "cc"
+ : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "%r15", "memory", "cc"
);
}
@@ -600,8 +600,8 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */
" mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */
" movq 24(%1), %%rdx;" /* f[3] */
- " mulxq 8(%1), %%r11, %%r12;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */
- " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%r12;" /* f[2]*f[3] */
+ " mulxq 8(%1), %%r11, %%rbx;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */
+ " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%rbx;" /* f[2]*f[3] */
" movq 8(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */
" mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */
@@ -611,12 +611,12 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" adcx %%r8, %%r8;"
" adox %%rcx, %%r11;"
" adcx %%r9, %%r9;"
- " adox %%r15, %%r12;"
+ " adox %%r15, %%rbx;"
" adcx %%r10, %%r10;"
" adox %%r15, %%r13;"
" adcx %%r11, %%r11;"
" adox %%r15, %%r14;"
- " adcx %%r12, %%r12;"
+ " adcx %%rbx, %%rbx;"
" adcx %%r13, %%r13;"
" adcx %%r14, %%r14;"
@@ -629,7 +629,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" adcx %%rcx, %%r10;" " movq %%r10, 24(%0);"
" movq 16(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
" adcx %%rax, %%r11;" " movq %%r11, 32(%0);"
- " adcx %%rcx, %%r12;" " movq %%r12, 40(%0);"
+ " adcx %%rcx, %%rbx;" " movq %%rbx, 40(%0);"
" movq 24(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
" adcx %%rax, %%r13;" " movq %%r13, 48(%0);"
" adcx %%rcx, %%r14;" " movq %%r14, 56(%0);"
@@ -640,8 +640,8 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" mulxq 48(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */
" mulxq 56(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */
" movq 56(%1), %%rdx;" /* f[3] */
- " mulxq 40(%1), %%r11, %%r12;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */
- " mulxq 48(%1), %%rax, %%r13;" " adcx %%rax, %%r12;" /* f[2]*f[3] */
+ " mulxq 40(%1), %%r11, %%rbx;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */
+ " mulxq 48(%1), %%rax, %%r13;" " adcx %%rax, %%rbx;" /* f[2]*f[3] */
" movq 40(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */
" mulxq 48(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */
@@ -651,12 +651,12 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" adcx %%r8, %%r8;"
" adox %%rcx, %%r11;"
" adcx %%r9, %%r9;"
- " adox %%r15, %%r12;"
+ " adox %%r15, %%rbx;"
" adcx %%r10, %%r10;"
" adox %%r15, %%r13;"
" adcx %%r11, %%r11;"
" adox %%r15, %%r14;"
- " adcx %%r12, %%r12;"
+ " adcx %%rbx, %%rbx;"
" adcx %%r13, %%r13;"
" adcx %%r14, %%r14;"
@@ -669,7 +669,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" adcx %%rcx, %%r10;" " movq %%r10, 88(%0);"
" movq 48(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
" adcx %%rax, %%r11;" " movq %%r11, 96(%0);"
- " adcx %%rcx, %%r12;" " movq %%r12, 104(%0);"
+ " adcx %%rcx, %%rbx;" " movq %%rbx, 104(%0);"
" movq 56(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
" adcx %%rax, %%r13;" " movq %%r13, 112(%0);"
" adcx %%rcx, %%r14;" " movq %%r14, 120(%0);"
@@ -683,11 +683,11 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" mulxq 32(%1), %%r8, %%r13;"
" xor %%rcx, %%rcx;"
" adoxq 0(%1), %%r8;"
- " mulxq 40(%1), %%r9, %%r12;"
+ " mulxq 40(%1), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
" adoxq 8(%1), %%r9;"
" mulxq 48(%1), %%r10, %%r13;"
- " adcx %%r12, %%r10;"
+ " adcx %%rbx, %%r10;"
" adoxq 16(%1), %%r10;"
" mulxq 56(%1), %%r11, %%rax;"
" adcx %%r13, %%r11;"
@@ -716,11 +716,11 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" mulxq 96(%1), %%r8, %%r13;"
" xor %%rcx, %%rcx;"
" adoxq 64(%1), %%r8;"
- " mulxq 104(%1), %%r9, %%r12;"
+ " mulxq 104(%1), %%r9, %%rbx;"
" adcx %%r13, %%r9;"
" adoxq 72(%1), %%r9;"
" mulxq 112(%1), %%r10, %%r13;"
- " adcx %%r12, %%r10;"
+ " adcx %%rbx, %%r10;"
" adoxq 80(%1), %%r10;"
" mulxq 120(%1), %%r11, %%rax;"
" adcx %%r13, %%r11;"
@@ -745,7 +745,7 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
" movq %%r8, 32(%0);"
: "+&r" (tmp), "+&r" (f), "+&r" (out)
:
- : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "memory", "cc"
+ : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "%r15", "memory", "cc"
);
}