summaryrefslogtreecommitdiffstats
path: root/lib/libssl/src/crypto/rc4/asm
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libssl/src/crypto/rc4/asm')
-rw-r--r--lib/libssl/src/crypto/rc4/asm/r4-win32.asm314
-rw-r--r--lib/libssl/src/crypto/rc4/asm/rc4-586.pl173
-rw-r--r--lib/libssl/src/crypto/rc4/asm/rx86unix.cpp358
3 files changed, 845 insertions, 0 deletions
diff --git a/lib/libssl/src/crypto/rc4/asm/r4-win32.asm b/lib/libssl/src/crypto/rc4/asm/r4-win32.asm
new file mode 100644
index 00000000000..70b0f7484cd
--- /dev/null
+++ b/lib/libssl/src/crypto/rc4/asm/r4-win32.asm
@@ -0,0 +1,314 @@
+ ; Don't even think of reading this code
+ ; It was automatically generated by rc4-586.pl
+ ; Which is a perl program used to generate the x86 assember for
+ ; any of elf, a.out, BSDI,Win32, or Solaris
+ ; eric <eay@cryptsoft.com>
+ ;
+ TITLE rc4-586.asm
+ .386
+.model FLAT
+_TEXT SEGMENT
+PUBLIC _RC4
+
+_RC4 PROC NEAR
+ ;
+ push ebp
+ push ebx
+ mov ebp, DWORD PTR 12[esp]
+ mov ebx, DWORD PTR 16[esp]
+ push esi
+ push edi
+ mov ecx, DWORD PTR [ebp]
+ mov edx, DWORD PTR 4[ebp]
+ mov esi, DWORD PTR 28[esp]
+ inc ecx
+ sub esp, 12
+ add ebp, 8
+ and ecx, 255
+ lea ebx, DWORD PTR [esi+ebx-8]
+ mov edi, DWORD PTR 44[esp]
+ mov DWORD PTR 8[esp],ebx
+ mov eax, DWORD PTR [ecx*4+ebp]
+ cmp ebx, esi
+ jl $L000end
+L001start:
+ add esi, 8
+ ; Round 0
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov BYTE PTR [esp], bl
+ ; Round 1
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov BYTE PTR 1[esp],bl
+ ; Round 2
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov BYTE PTR 2[esp],bl
+ ; Round 3
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov BYTE PTR 3[esp],bl
+ ; Round 4
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov BYTE PTR 4[esp],bl
+ ; Round 5
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov BYTE PTR 5[esp],bl
+ ; Round 6
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov BYTE PTR 6[esp],bl
+ ; Round 7
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ add edi, 8
+ mov BYTE PTR 7[esp],bl
+ ; apply the cipher text
+ mov eax, DWORD PTR [esp]
+ mov ebx, DWORD PTR [esi-8]
+ xor eax, ebx
+ mov ebx, DWORD PTR [esi-4]
+ mov DWORD PTR [edi-8],eax
+ mov eax, DWORD PTR 4[esp]
+ xor eax, ebx
+ mov ebx, DWORD PTR 8[esp]
+ mov DWORD PTR [edi-4],eax
+ mov eax, DWORD PTR [ecx*4+ebp]
+ cmp esi, ebx
+ jle L001start
+$L000end:
+ ; Round 0
+ add ebx, 8
+ inc esi
+ cmp ebx, esi
+ jl $L002finished
+ mov DWORD PTR 8[esp],ebx
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov bh, BYTE PTR [esi-1]
+ xor bl, bh
+ mov BYTE PTR [edi], bl
+ ; Round 1
+ mov ebx, DWORD PTR 8[esp]
+ cmp ebx, esi
+ jle $L002finished
+ inc esi
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov bh, BYTE PTR [esi-1]
+ xor bl, bh
+ mov BYTE PTR 1[edi],bl
+ ; Round 2
+ mov ebx, DWORD PTR 8[esp]
+ cmp ebx, esi
+ jle $L002finished
+ inc esi
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov bh, BYTE PTR [esi-1]
+ xor bl, bh
+ mov BYTE PTR 2[edi],bl
+ ; Round 3
+ mov ebx, DWORD PTR 8[esp]
+ cmp ebx, esi
+ jle $L002finished
+ inc esi
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov bh, BYTE PTR [esi-1]
+ xor bl, bh
+ mov BYTE PTR 3[edi],bl
+ ; Round 4
+ mov ebx, DWORD PTR 8[esp]
+ cmp ebx, esi
+ jle $L002finished
+ inc esi
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov bh, BYTE PTR [esi-1]
+ xor bl, bh
+ mov BYTE PTR 4[edi],bl
+ ; Round 5
+ mov ebx, DWORD PTR 8[esp]
+ cmp ebx, esi
+ jle $L002finished
+ inc esi
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov eax, DWORD PTR [ecx*4+ebp]
+ mov bh, BYTE PTR [esi-1]
+ xor bl, bh
+ mov BYTE PTR 5[edi],bl
+ ; Round 6
+ mov ebx, DWORD PTR 8[esp]
+ cmp ebx, esi
+ jle $L002finished
+ inc esi
+ add edx, eax
+ and edx, 255
+ inc ecx
+ mov ebx, DWORD PTR [edx*4+ebp]
+ mov DWORD PTR [ecx*4+ebp-4],ebx
+ add ebx, eax
+ and ecx, 255
+ and ebx, 255
+ mov DWORD PTR [edx*4+ebp],eax
+ nop
+ mov ebx, DWORD PTR [ebx*4+ebp]
+ mov bh, BYTE PTR [esi-1]
+ xor bl, bh
+ mov BYTE PTR 6[edi],bl
+$L002finished:
+ dec ecx
+ add esp, 12
+ mov DWORD PTR [ebp-4],edx
+ mov BYTE PTR [ebp-8],cl
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+ ret
+_RC4 ENDP
+_TEXT ENDS
+END
diff --git a/lib/libssl/src/crypto/rc4/asm/rc4-586.pl b/lib/libssl/src/crypto/rc4/asm/rc4-586.pl
new file mode 100644
index 00000000000..0dd8eb1ba9a
--- /dev/null
+++ b/lib/libssl/src/crypto/rc4/asm/rc4-586.pl
@@ -0,0 +1,173 @@
+#!/usr/bin/perl
+
+# define for pentium pro friendly version
+
+push(@INC,"perlasm","../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"rc4-586.pl");
+
+$tx="eax";
+$ty="ebx";
+$x="ecx";
+$y="edx";
+$in="esi";
+$out="edi";
+$d="ebp";
+
+&RC4("RC4");
+
+&asm_finish();
+
+sub RC4_loop
+ {
+ local($n,$p,$char)=@_;
+
+ &comment("Round $n");
+
+ if ($char)
+ {
+ if ($p >= 0)
+ {
+ &mov($ty, &swtmp(2));
+ &cmp($ty, $in);
+ &jle(&label("finished"));
+ &inc($in);
+ }
+ else
+ {
+ &add($ty, 8);
+ &inc($in);
+ &cmp($ty, $in);
+ &jl(&label("finished"));
+ &mov(&swtmp(2), $ty);
+ }
+ }
+ # Moved out
+ # &mov( $tx, &DWP(0,$d,$x,4)) if $p < 0;
+
+ &add( $y, $tx);
+ &and( $y, 0xff);
+ &inc( $x); # NEXT ROUND
+ &mov( $ty, &DWP(0,$d,$y,4));
+ # XXX
+ &mov( &DWP(-4,$d,$x,4),$ty); # AGI
+ &add( $ty, $tx);
+ &and( $x, 0xff); # NEXT ROUND
+ &and( $ty, 0xff);
+ &mov( &DWP(0,$d,$y,4),$tx);
+ &nop();
+ &mov( $ty, &DWP(0,$d,$ty,4));
+ &mov( $tx, &DWP(0,$d,$x,4)) if $p < 1; # NEXT ROUND
+ # XXX
+
+ if (!$char)
+ {
+ #moved up into last round
+ if ($p >= 1)
+ {
+ &add( $out, 8)
+ }
+ &movb( &BP($n,"esp","",0), &LB($ty));
+ }
+ else
+ {
+ # Note in+=8 has occured
+ &movb( &HB($ty), &BP(-1,$in,"",0));
+ # XXX
+ &xorb(&LB($ty), &HB($ty));
+ # XXX
+ &movb(&BP($n,$out,"",0),&LB($ty));
+ }
+ }
+
+
+sub RC4
+ {
+ local($name)=@_;
+
+ &function_begin_B($name,"");
+
+ &comment("");
+
+ &push("ebp");
+ &push("ebx");
+ &mov( $d, &wparam(0)); # key
+ &mov( $ty, &wparam(1)); # num
+ &push("esi");
+ &push("edi");
+
+ &mov( $x, &DWP(0,$d,"",1));
+ &mov( $y, &DWP(4,$d,"",1));
+
+ &mov( $in, &wparam(2));
+ &inc( $x);
+
+ &stack_push(3); # 3 temp variables
+ &add( $d, 8);
+ &and( $x, 0xff);
+
+ &lea( $ty, &DWP(-8,$ty,$in));
+
+ # check for 0 length input
+
+ &mov( $out, &wparam(3));
+ &mov( &swtmp(2), $ty); # this is now address to exit at
+ &mov( $tx, &DWP(0,$d,$x,4));
+
+ &cmp( $ty, $in);
+ &jl( &label("end")); # less than 8 bytes
+
+ &set_label("start");
+
+ # filling DELAY SLOT
+ &add( $in, 8);
+
+ &RC4_loop(0,-1,0);
+ &RC4_loop(1,0,0);
+ &RC4_loop(2,0,0);
+ &RC4_loop(3,0,0);
+ &RC4_loop(4,0,0);
+ &RC4_loop(5,0,0);
+ &RC4_loop(6,0,0);
+ &RC4_loop(7,1,0);
+
+ &comment("apply the cipher text");
+ # xor the cipher data with input
+
+ #&add( $out, 8); #moved up into last round
+
+ &mov( $tx, &swtmp(0));
+ &mov( $ty, &DWP(-8,$in,"",0));
+ &xor( $tx, $ty);
+ &mov( $ty, &DWP(-4,$in,"",0));
+ &mov( &DWP(-8,$out,"",0), $tx);
+ &mov( $tx, &swtmp(1));
+ &xor( $tx, $ty);
+ &mov( $ty, &swtmp(2)); # load end ptr;
+ &mov( &DWP(-4,$out,"",0), $tx);
+ &mov( $tx, &DWP(0,$d,$x,4));
+ &cmp($in, $ty);
+ &jle(&label("start"));
+
+ &set_label("end");
+
+ # There is quite a bit of extra crap in RC4_loop() for this
+ # first round
+ &RC4_loop(0,-1,1);
+ &RC4_loop(1,0,1);
+ &RC4_loop(2,0,1);
+ &RC4_loop(3,0,1);
+ &RC4_loop(4,0,1);
+ &RC4_loop(5,0,1);
+ &RC4_loop(6,1,1);
+
+ &set_label("finished");
+ &dec( $x);
+ &stack_pop(3);
+ &mov( &DWP(-4,$d,"",0),$y);
+ &movb( &BP(-8,$d,"",0),&LB($x));
+
+ &function_end($name);
+ }
+
diff --git a/lib/libssl/src/crypto/rc4/asm/rx86unix.cpp b/lib/libssl/src/crypto/rc4/asm/rx86unix.cpp
new file mode 100644
index 00000000000..ec1d72a1100
--- /dev/null
+++ b/lib/libssl/src/crypto/rc4/asm/rx86unix.cpp
@@ -0,0 +1,358 @@
+/* Run the C pre-processor over this file with one of the following defined
+ * ELF - elf object files,
+ * OUT - a.out object files,
+ * BSDI - BSDI style a.out object files
+ * SOL - Solaris style elf
+ */
+
+#define TYPE(a,b) .type a,b
+#define SIZE(a,b) .size a,b
+
+#if defined(OUT) || defined(BSDI)
+#define RC4 _RC4
+
+#endif
+
+#ifdef OUT
+#define OK 1
+#define ALIGN 4
+#endif
+
+#ifdef BSDI
+#define OK 1
+#define ALIGN 4
+#undef SIZE
+#undef TYPE
+#define SIZE(a,b)
+#define TYPE(a,b)
+#endif
+
+#if defined(ELF) || defined(SOL)
+#define OK 1
+#define ALIGN 16
+#endif
+
+#ifndef OK
+You need to define one of
+ELF - elf systems - linux-elf, NetBSD and DG-UX
+OUT - a.out systems - linux-a.out and FreeBSD
+SOL - solaris systems, which are elf with strange comment lines
+BSDI - a.out with a very primative version of as.
+#endif
+
+/* Let the Assembler begin :-) */
+ /* Don't even think of reading this code */
+ /* It was automatically generated by rc4-586.pl */
+ /* Which is a perl program used to generate the x86 assember for */
+ /* any of elf, a.out, BSDI,Win32, or Solaris */
+ /* eric <eay@cryptsoft.com> */
+
+ .file "rc4-586.s"
+ .version "01.01"
+gcc2_compiled.:
+.text
+ .align ALIGN
+.globl RC4
+ TYPE(RC4,@function)
+RC4:
+
+ pushl %ebp
+ pushl %ebx
+ movl 12(%esp), %ebp
+ movl 16(%esp), %ebx
+ pushl %esi
+ pushl %edi
+ movl (%ebp), %ecx
+ movl 4(%ebp), %edx
+ movl 28(%esp), %esi
+ incl %ecx
+ subl $12, %esp
+ addl $8, %ebp
+ andl $255, %ecx
+ leal -8(%ebx,%esi,), %ebx
+ movl 44(%esp), %edi
+ movl %ebx, 8(%esp)
+ movl (%ebp,%ecx,4), %eax
+ cmpl %esi, %ebx
+ jl .L000end
+.L001start:
+ addl $8, %esi
+ /* Round 0 */
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb %bl, (%esp)
+ /* Round 1 */
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb %bl, 1(%esp)
+ /* Round 2 */
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb %bl, 2(%esp)
+ /* Round 3 */
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb %bl, 3(%esp)
+ /* Round 4 */
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb %bl, 4(%esp)
+ /* Round 5 */
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb %bl, 5(%esp)
+ /* Round 6 */
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb %bl, 6(%esp)
+ /* Round 7 */
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ addl $8, %edi
+ movb %bl, 7(%esp)
+ /* apply the cipher text */
+ movl (%esp), %eax
+ movl -8(%esi), %ebx
+ xorl %ebx, %eax
+ movl -4(%esi), %ebx
+ movl %eax, -8(%edi)
+ movl 4(%esp), %eax
+ xorl %ebx, %eax
+ movl 8(%esp), %ebx
+ movl %eax, -4(%edi)
+ movl (%ebp,%ecx,4), %eax
+ cmpl %ebx, %esi
+ jle .L001start
+.L000end:
+ /* Round 0 */
+ addl $8, %ebx
+ incl %esi
+ cmpl %esi, %ebx
+ jl .L002finished
+ movl %ebx, 8(%esp)
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb -1(%esi), %bh
+ xorb %bh, %bl
+ movb %bl, (%edi)
+ /* Round 1 */
+ movl 8(%esp), %ebx
+ cmpl %esi, %ebx
+ jle .L002finished
+ incl %esi
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb -1(%esi), %bh
+ xorb %bh, %bl
+ movb %bl, 1(%edi)
+ /* Round 2 */
+ movl 8(%esp), %ebx
+ cmpl %esi, %ebx
+ jle .L002finished
+ incl %esi
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb -1(%esi), %bh
+ xorb %bh, %bl
+ movb %bl, 2(%edi)
+ /* Round 3 */
+ movl 8(%esp), %ebx
+ cmpl %esi, %ebx
+ jle .L002finished
+ incl %esi
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb -1(%esi), %bh
+ xorb %bh, %bl
+ movb %bl, 3(%edi)
+ /* Round 4 */
+ movl 8(%esp), %ebx
+ cmpl %esi, %ebx
+ jle .L002finished
+ incl %esi
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb -1(%esi), %bh
+ xorb %bh, %bl
+ movb %bl, 4(%edi)
+ /* Round 5 */
+ movl 8(%esp), %ebx
+ cmpl %esi, %ebx
+ jle .L002finished
+ incl %esi
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movl (%ebp,%ecx,4), %eax
+ movb -1(%esi), %bh
+ xorb %bh, %bl
+ movb %bl, 5(%edi)
+ /* Round 6 */
+ movl 8(%esp), %ebx
+ cmpl %esi, %ebx
+ jle .L002finished
+ incl %esi
+ addl %eax, %edx
+ andl $255, %edx
+ incl %ecx
+ movl (%ebp,%edx,4), %ebx
+ movl %ebx, -4(%ebp,%ecx,4)
+ addl %eax, %ebx
+ andl $255, %ecx
+ andl $255, %ebx
+ movl %eax, (%ebp,%edx,4)
+ nop
+ movl (%ebp,%ebx,4), %ebx
+ movb -1(%esi), %bh
+ xorb %bh, %bl
+ movb %bl, 6(%edi)
+.L002finished:
+ decl %ecx
+ addl $12, %esp
+ movl %edx, -4(%ebp)
+ movb %cl, -8(%ebp)
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.RC4_end:
+ SIZE(RC4,.RC4_end-RC4)
+.ident "RC4"