aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/lib-64/csum_partial.S
blob: 25aba660cc9c1285141d2fb6c971f56336069c4a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Quick'n'dirty IP checksum ...
 *
 * Copyright (C) 1998, 1999 Ralf Baechle
 * Copyright (C) 1999 Silicon Graphics, Inc.
 */
#include <asm/asm.h>
#include <asm/regdef.h>

#define ADDC(sum,reg)						\
	addu	sum, reg;					\
	sltu	v1, sum, reg;					\
	addu	sum, v1

#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3)		\
	lw	t0, (offset + 0x00)(src);			\
	lw	t1, (offset + 0x04)(src);			\
	lw	t2, (offset + 0x08)(src); 			\
	lw	t3, (offset + 0x0c)(src); 			\
	ADDC(sum, t0);						\
	ADDC(sum, t1);						\
	ADDC(sum, t2);						\
	ADDC(sum, t3);						\
	lw	t0, (offset + 0x10)(src);			\
	lw	t1, (offset + 0x14)(src);			\
	lw	t2, (offset + 0x18)(src);			\
	lw	t3, (offset + 0x1c)(src);			\
	ADDC(sum, t0);						\
	ADDC(sum, t1);						\
	ADDC(sum, t2);						\
	ADDC(sum, t3);						\

/*
 * a0: source address
 * a1: length of the area to checksum
 * a2: partial checksum
 */

#define src a0
#define sum v0

	.text
	.set	noreorder

/* unknown src alignment and < 8 bytes to go  */
small_csumcpy:
	move	a1, ta2

	andi	ta0, a1, 4
	beqz	ta0, 1f
	 andi	ta0, a1, 2

	/* Still a full word to go  */
	ulw	ta1, (src)
	daddiu	src, 4
	ADDC(sum, ta1)

1:	move	ta1, zero
	beqz	ta0, 1f
	 andi	ta0, a1, 1

	/* Still a halfword to go  */
	ulhu	ta1, (src)
	daddiu	src, 2

1:	beqz	ta0, 1f
	 sll	ta1, ta1, 16

	lbu	ta2, (src)
	 nop

#ifdef __MIPSEB__
	sll	ta2, ta2, 8
#endif
	or	ta1, ta2

1:	ADDC(sum, ta1)

	/* fold checksum */
	sll	v1, sum, 16
	addu	sum, v1
	sltu	v1, sum, v1
	srl	sum, sum, 16
	addu	sum, v1

	/* odd buffer alignment? */
	beqz	t3, 1f
	 nop
	sll	v1, sum, 8
	srl	sum, sum, 8
	or	sum, v1
	andi	sum, 0xffff
1:
	.set	reorder
	/* Add the passed partial csum.  */
	ADDC(sum, a2)
	jr	ra
	.set	noreorder

/* ------------------------------------------------------------------------- */

	.align	5
LEAF(csum_partial)
	move	sum, zero
	move	t3, zero

	sltiu	t8, a1, 0x8
	bnez	t8, small_csumcpy		/* < 8 bytes to copy */
	 move	ta2, a1

	beqz	a1, out
	 andi	t3, src, 0x1			/* odd buffer? */

hword_align:
	beqz	t3, word_align
	 andi	t8, src, 0x2

	lbu	ta0, (src)
	dsubu	a1, a1, 0x1
#ifdef __MIPSEL__
	sll	ta0, ta0, 8
#endif
	ADDC(sum, ta0)
	daddu	src, src, 0x1
	andi	t8, src, 0x2

word_align:
	beqz	t8, dword_align
	 sltiu	t8, a1, 56

	lhu	ta0, (src)
	dsubu	a1, a1, 0x2
	ADDC(sum, ta0)
	sltiu	t8, a1, 56
	daddu	src, src, 0x2

dword_align:
	bnez	t8, do_end_words
	 move	t8, a1

	andi	t8, src, 0x4
	beqz	t8, qword_align
	 andi	t8, src, 0x8

	lw	ta0, 0x00(src)
	dsubu	a1, a1, 0x4
	ADDC(sum, ta0)
	daddu	src, src, 0x4
	andi	t8, src, 0x8

qword_align:
	beqz	t8, oword_align
	 andi	t8, src, 0x10

	lw	ta0, 0x00(src)
	lw	ta1, 0x04(src)
	dsubu	a1, a1, 0x8
	ADDC(sum, ta0)
	ADDC(sum, ta1)
	daddu	src, src, 0x8
	andi	t8, src, 0x10

oword_align:
	beqz	t8, begin_movement
	 dsrl	t8, a1, 0x7

	lw	ta3, 0x08(src)
	lw	t0, 0x0c(src)
	lw	ta0, 0x00(src)
	lw	ta1, 0x04(src)
	ADDC(sum, ta3)
	ADDC(sum, t0)
	ADDC(sum, ta0)
	ADDC(sum, ta1)
	dsubu	a1, a1, 0x10
	daddu	src, src, 0x10
	dsrl	t8, a1, 0x7

begin_movement:
	beqz	t8, 1f
	 andi	ta2, a1, 0x40

move_128bytes:
	CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
	CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
	CSUM_BIGCHUNK(src, 0x40, sum, ta0, ta1, ta3, t0)
	CSUM_BIGCHUNK(src, 0x60, sum, ta0, ta1, ta3, t0)
	dsubu	t8, t8, 0x01
	bnez	t8, move_128bytes
	 daddu	src, src, 0x80

1:
	beqz	ta2, 1f
	 andi	ta2, a1, 0x20

move_64bytes:
	CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
	CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
	daddu	src, src, 0x40

1:
	beqz	ta2, do_end_words
	 andi	t8, a1, 0x1c

move_32bytes:
	CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
	andi	t8, a1, 0x1c
	daddu	src, src, 0x20

do_end_words:
	beqz	t8, maybe_end_cruft
	 dsrl	t8, t8, 0x2

end_words:
	lw	ta0, (src)
	dsubu	t8, t8, 0x1
	ADDC(sum, ta0)
	bnez	t8, end_words
	 daddu	src, src, 0x4

maybe_end_cruft:
	andi	ta2, a1, 0x3

small_memcpy:
 j small_csumcpy; move a1, ta2		/* XXX ??? */
	beqz	t2, out
	 move	a1, ta2

end_bytes:
	lb	ta0, (src)
	dsubu	a1, a1, 0x1
	bnez	a2, end_bytes
	 daddu	src, src, 0x1

out:
	jr	ra
	 move	v0, sum
	END(csum_partial)