Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1148)

Side by Side Diff: third_party/boringssl/linux-arm/crypto/sha/sha1-armv4-large.S

Issue 377783004: Add BoringSSL GYP files. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Final Python fix. Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 #if defined(__arm__)
2 #include "arm_arch.h"
3
4 .text
5
6 .global sha1_block_data_order
7 .type sha1_block_data_order,%function
8
9 .align 2
10 sha1_block_data_order:
11 stmdb sp!,{r4-r12,lr}
12 add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
13 ldmia r0,{r3,r4,r5,r6,r7}
14 .Lloop:
15 ldr r8,.LK_00_19
16 mov r14,sp
17 sub sp,sp,#15*4
18 mov r5,r5,ror#30
19 mov r6,r6,ror#30
20 mov r7,r7,ror#30 @ [6]
21 .L_00_15:
22 #if __ARM_ARCH__<7
23 ldrb r10,[r1,#2]
24 ldrb r9,[r1,#3]
25 ldrb r11,[r1,#1]
26 add r7,r8,r7,ror#2 @ E+=K_00_19
27 ldrb r12,[r1],#4
28 orr r9,r9,r10,lsl#8
29 eor r10,r5,r6 @ F_xx_xx
30 orr r9,r9,r11,lsl#16
31 add r7,r7,r3,ror#27 @ E+=ROR(A,27)
32 orr r9,r9,r12,lsl#24
33 #else
34 ldr r9,[r1],#4 @ handles unaligned
35 add r7,r8,r7,ror#2 @ E+=K_00_19
36 eor r10,r5,r6 @ F_xx_xx
37 add r7,r7,r3,ror#27 @ E+=ROR(A,27)
38 #ifdef __ARMEL__
39 rev r9,r9 @ byte swap
40 #endif
41 #endif
42 and r10,r4,r10,ror#2
43 add r7,r7,r9 @ E+=X[i]
44 eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
45 str r9,[r14,#-4]!
46 add r7,r7,r10 @ E+=F_00_19(B,C,D)
47 #if __ARM_ARCH__<7
48 ldrb r10,[r1,#2]
49 ldrb r9,[r1,#3]
50 ldrb r11,[r1,#1]
51 add r6,r8,r6,ror#2 @ E+=K_00_19
52 ldrb r12,[r1],#4
53 orr r9,r9,r10,lsl#8
54 eor r10,r4,r5 @ F_xx_xx
55 orr r9,r9,r11,lsl#16
56 add r6,r6,r7,ror#27 @ E+=ROR(A,27)
57 orr r9,r9,r12,lsl#24
58 #else
59 ldr r9,[r1],#4 @ handles unaligned
60 add r6,r8,r6,ror#2 @ E+=K_00_19
61 eor r10,r4,r5 @ F_xx_xx
62 add r6,r6,r7,ror#27 @ E+=ROR(A,27)
63 #ifdef __ARMEL__
64 rev r9,r9 @ byte swap
65 #endif
66 #endif
67 and r10,r3,r10,ror#2
68 add r6,r6,r9 @ E+=X[i]
69 eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
70 str r9,[r14,#-4]!
71 add r6,r6,r10 @ E+=F_00_19(B,C,D)
72 #if __ARM_ARCH__<7
73 ldrb r10,[r1,#2]
74 ldrb r9,[r1,#3]
75 ldrb r11,[r1,#1]
76 add r5,r8,r5,ror#2 @ E+=K_00_19
77 ldrb r12,[r1],#4
78 orr r9,r9,r10,lsl#8
79 eor r10,r3,r4 @ F_xx_xx
80 orr r9,r9,r11,lsl#16
81 add r5,r5,r6,ror#27 @ E+=ROR(A,27)
82 orr r9,r9,r12,lsl#24
83 #else
84 ldr r9,[r1],#4 @ handles unaligned
85 add r5,r8,r5,ror#2 @ E+=K_00_19
86 eor r10,r3,r4 @ F_xx_xx
87 add r5,r5,r6,ror#27 @ E+=ROR(A,27)
88 #ifdef __ARMEL__
89 rev r9,r9 @ byte swap
90 #endif
91 #endif
92 and r10,r7,r10,ror#2
93 add r5,r5,r9 @ E+=X[i]
94 eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
95 str r9,[r14,#-4]!
96 add r5,r5,r10 @ E+=F_00_19(B,C,D)
97 #if __ARM_ARCH__<7
98 ldrb r10,[r1,#2]
99 ldrb r9,[r1,#3]
100 ldrb r11,[r1,#1]
101 add r4,r8,r4,ror#2 @ E+=K_00_19
102 ldrb r12,[r1],#4
103 orr r9,r9,r10,lsl#8
104 eor r10,r7,r3 @ F_xx_xx
105 orr r9,r9,r11,lsl#16
106 add r4,r4,r5,ror#27 @ E+=ROR(A,27)
107 orr r9,r9,r12,lsl#24
108 #else
109 ldr r9,[r1],#4 @ handles unaligned
110 add r4,r8,r4,ror#2 @ E+=K_00_19
111 eor r10,r7,r3 @ F_xx_xx
112 add r4,r4,r5,ror#27 @ E+=ROR(A,27)
113 #ifdef __ARMEL__
114 rev r9,r9 @ byte swap
115 #endif
116 #endif
117 and r10,r6,r10,ror#2
118 add r4,r4,r9 @ E+=X[i]
119 eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
120 str r9,[r14,#-4]!
121 add r4,r4,r10 @ E+=F_00_19(B,C,D)
122 #if __ARM_ARCH__<7
123 ldrb r10,[r1,#2]
124 ldrb r9,[r1,#3]
125 ldrb r11,[r1,#1]
126 add r3,r8,r3,ror#2 @ E+=K_00_19
127 ldrb r12,[r1],#4
128 orr r9,r9,r10,lsl#8
129 eor r10,r6,r7 @ F_xx_xx
130 orr r9,r9,r11,lsl#16
131 add r3,r3,r4,ror#27 @ E+=ROR(A,27)
132 orr r9,r9,r12,lsl#24
133 #else
134 ldr r9,[r1],#4 @ handles unaligned
135 add r3,r8,r3,ror#2 @ E+=K_00_19
136 eor r10,r6,r7 @ F_xx_xx
137 add r3,r3,r4,ror#27 @ E+=ROR(A,27)
138 #ifdef __ARMEL__
139 rev r9,r9 @ byte swap
140 #endif
141 #endif
142 and r10,r5,r10,ror#2
143 add r3,r3,r9 @ E+=X[i]
144 eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
145 str r9,[r14,#-4]!
146 add r3,r3,r10 @ E+=F_00_19(B,C,D)
147 teq r14,sp
148 bne .L_00_15 @ [((11+4)*5+2)*3]
149 sub sp,sp,#25*4
150 #if __ARM_ARCH__<7
151 ldrb r10,[r1,#2]
152 ldrb r9,[r1,#3]
153 ldrb r11,[r1,#1]
154 add r7,r8,r7,ror#2 @ E+=K_00_19
155 ldrb r12,[r1],#4
156 orr r9,r9,r10,lsl#8
157 eor r10,r5,r6 @ F_xx_xx
158 orr r9,r9,r11,lsl#16
159 add r7,r7,r3,ror#27 @ E+=ROR(A,27)
160 orr r9,r9,r12,lsl#24
161 #else
162 ldr r9,[r1],#4 @ handles unaligned
163 add r7,r8,r7,ror#2 @ E+=K_00_19
164 eor r10,r5,r6 @ F_xx_xx
165 add r7,r7,r3,ror#27 @ E+=ROR(A,27)
166 #ifdef __ARMEL__
167 rev r9,r9 @ byte swap
168 #endif
169 #endif
170 and r10,r4,r10,ror#2
171 add r7,r7,r9 @ E+=X[i]
172 eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
173 str r9,[r14,#-4]!
174 add r7,r7,r10 @ E+=F_00_19(B,C,D)
175 ldr r9,[r14,#15*4]
176 ldr r10,[r14,#13*4]
177 ldr r11,[r14,#7*4]
178 add r6,r8,r6,ror#2 @ E+=K_xx_xx
179 ldr r12,[r14,#2*4]
180 eor r9,r9,r10
181 eor r11,r11,r12 @ 1 cycle stall
182 eor r10,r4,r5 @ F_xx_xx
183 mov r9,r9,ror#31
184 add r6,r6,r7,ror#27 @ E+=ROR(A,27)
185 eor r9,r9,r11,ror#31
186 str r9,[r14,#-4]!
187 and r10,r3,r10,ror#2 @ F_xx_xx
188 @ F_xx_xx
189 add r6,r6,r9 @ E+=X[i]
190 eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
191 add r6,r6,r10 @ E+=F_00_19(B,C,D)
192 ldr r9,[r14,#15*4]
193 ldr r10,[r14,#13*4]
194 ldr r11,[r14,#7*4]
195 add r5,r8,r5,ror#2 @ E+=K_xx_xx
196 ldr r12,[r14,#2*4]
197 eor r9,r9,r10
198 eor r11,r11,r12 @ 1 cycle stall
199 eor r10,r3,r4 @ F_xx_xx
200 mov r9,r9,ror#31
201 add r5,r5,r6,ror#27 @ E+=ROR(A,27)
202 eor r9,r9,r11,ror#31
203 str r9,[r14,#-4]!
204 and r10,r7,r10,ror#2 @ F_xx_xx
205 @ F_xx_xx
206 add r5,r5,r9 @ E+=X[i]
207 eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
208 add r5,r5,r10 @ E+=F_00_19(B,C,D)
209 ldr r9,[r14,#15*4]
210 ldr r10,[r14,#13*4]
211 ldr r11,[r14,#7*4]
212 add r4,r8,r4,ror#2 @ E+=K_xx_xx
213 ldr r12,[r14,#2*4]
214 eor r9,r9,r10
215 eor r11,r11,r12 @ 1 cycle stall
216 eor r10,r7,r3 @ F_xx_xx
217 mov r9,r9,ror#31
218 add r4,r4,r5,ror#27 @ E+=ROR(A,27)
219 eor r9,r9,r11,ror#31
220 str r9,[r14,#-4]!
221 and r10,r6,r10,ror#2 @ F_xx_xx
222 @ F_xx_xx
223 add r4,r4,r9 @ E+=X[i]
224 eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
225 add r4,r4,r10 @ E+=F_00_19(B,C,D)
226 ldr r9,[r14,#15*4]
227 ldr r10,[r14,#13*4]
228 ldr r11,[r14,#7*4]
229 add r3,r8,r3,ror#2 @ E+=K_xx_xx
230 ldr r12,[r14,#2*4]
231 eor r9,r9,r10
232 eor r11,r11,r12 @ 1 cycle stall
233 eor r10,r6,r7 @ F_xx_xx
234 mov r9,r9,ror#31
235 add r3,r3,r4,ror#27 @ E+=ROR(A,27)
236 eor r9,r9,r11,ror#31
237 str r9,[r14,#-4]!
238 and r10,r5,r10,ror#2 @ F_xx_xx
239 @ F_xx_xx
240 add r3,r3,r9 @ E+=X[i]
241 eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
242 add r3,r3,r10 @ E+=F_00_19(B,C,D)
243
244 ldr r8,.LK_20_39 @ [+15+16*4]
245 cmn sp,#0 @ [+3], clear carry to denote 20_39
246 .L_20_39_or_60_79:
247 ldr r9,[r14,#15*4]
248 ldr r10,[r14,#13*4]
249 ldr r11,[r14,#7*4]
250 add r7,r8,r7,ror#2 @ E+=K_xx_xx
251 ldr r12,[r14,#2*4]
252 eor r9,r9,r10
253 eor r11,r11,r12 @ 1 cycle stall
254 eor r10,r5,r6 @ F_xx_xx
255 mov r9,r9,ror#31
256 add r7,r7,r3,ror#27 @ E+=ROR(A,27)
257 eor r9,r9,r11,ror#31
258 str r9,[r14,#-4]!
259 eor r10,r4,r10,ror#2 @ F_xx_xx
260 @ F_xx_xx
261 add r7,r7,r9 @ E+=X[i]
262 add r7,r7,r10 @ E+=F_20_39(B,C,D)
263 ldr r9,[r14,#15*4]
264 ldr r10,[r14,#13*4]
265 ldr r11,[r14,#7*4]
266 add r6,r8,r6,ror#2 @ E+=K_xx_xx
267 ldr r12,[r14,#2*4]
268 eor r9,r9,r10
269 eor r11,r11,r12 @ 1 cycle stall
270 eor r10,r4,r5 @ F_xx_xx
271 mov r9,r9,ror#31
272 add r6,r6,r7,ror#27 @ E+=ROR(A,27)
273 eor r9,r9,r11,ror#31
274 str r9,[r14,#-4]!
275 eor r10,r3,r10,ror#2 @ F_xx_xx
276 @ F_xx_xx
277 add r6,r6,r9 @ E+=X[i]
278 add r6,r6,r10 @ E+=F_20_39(B,C,D)
279 ldr r9,[r14,#15*4]
280 ldr r10,[r14,#13*4]
281 ldr r11,[r14,#7*4]
282 add r5,r8,r5,ror#2 @ E+=K_xx_xx
283 ldr r12,[r14,#2*4]
284 eor r9,r9,r10
285 eor r11,r11,r12 @ 1 cycle stall
286 eor r10,r3,r4 @ F_xx_xx
287 mov r9,r9,ror#31
288 add r5,r5,r6,ror#27 @ E+=ROR(A,27)
289 eor r9,r9,r11,ror#31
290 str r9,[r14,#-4]!
291 eor r10,r7,r10,ror#2 @ F_xx_xx
292 @ F_xx_xx
293 add r5,r5,r9 @ E+=X[i]
294 add r5,r5,r10 @ E+=F_20_39(B,C,D)
295 ldr r9,[r14,#15*4]
296 ldr r10,[r14,#13*4]
297 ldr r11,[r14,#7*4]
298 add r4,r8,r4,ror#2 @ E+=K_xx_xx
299 ldr r12,[r14,#2*4]
300 eor r9,r9,r10
301 eor r11,r11,r12 @ 1 cycle stall
302 eor r10,r7,r3 @ F_xx_xx
303 mov r9,r9,ror#31
304 add r4,r4,r5,ror#27 @ E+=ROR(A,27)
305 eor r9,r9,r11,ror#31
306 str r9,[r14,#-4]!
307 eor r10,r6,r10,ror#2 @ F_xx_xx
308 @ F_xx_xx
309 add r4,r4,r9 @ E+=X[i]
310 add r4,r4,r10 @ E+=F_20_39(B,C,D)
311 ldr r9,[r14,#15*4]
312 ldr r10,[r14,#13*4]
313 ldr r11,[r14,#7*4]
314 add r3,r8,r3,ror#2 @ E+=K_xx_xx
315 ldr r12,[r14,#2*4]
316 eor r9,r9,r10
317 eor r11,r11,r12 @ 1 cycle stall
318 eor r10,r6,r7 @ F_xx_xx
319 mov r9,r9,ror#31
320 add r3,r3,r4,ror#27 @ E+=ROR(A,27)
321 eor r9,r9,r11,ror#31
322 str r9,[r14,#-4]!
323 eor r10,r5,r10,ror#2 @ F_xx_xx
324 @ F_xx_xx
325 add r3,r3,r9 @ E+=X[i]
326 add r3,r3,r10 @ E+=F_20_39(B,C,D)
327 teq r14,sp @ preserve carry
328 bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
329 bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
330
331 ldr r8,.LK_40_59
332 sub sp,sp,#20*4 @ [+2]
333 .L_40_59:
334 ldr r9,[r14,#15*4]
335 ldr r10,[r14,#13*4]
336 ldr r11,[r14,#7*4]
337 add r7,r8,r7,ror#2 @ E+=K_xx_xx
338 ldr r12,[r14,#2*4]
339 eor r9,r9,r10
340 eor r11,r11,r12 @ 1 cycle stall
341 eor r10,r5,r6 @ F_xx_xx
342 mov r9,r9,ror#31
343 add r7,r7,r3,ror#27 @ E+=ROR(A,27)
344 eor r9,r9,r11,ror#31
345 str r9,[r14,#-4]!
346 and r10,r4,r10,ror#2 @ F_xx_xx
347 and r11,r5,r6 @ F_xx_xx
348 add r7,r7,r9 @ E+=X[i]
349 add r7,r7,r10 @ E+=F_40_59(B,C,D)
350 add r7,r7,r11,ror#2
351 ldr r9,[r14,#15*4]
352 ldr r10,[r14,#13*4]
353 ldr r11,[r14,#7*4]
354 add r6,r8,r6,ror#2 @ E+=K_xx_xx
355 ldr r12,[r14,#2*4]
356 eor r9,r9,r10
357 eor r11,r11,r12 @ 1 cycle stall
358 eor r10,r4,r5 @ F_xx_xx
359 mov r9,r9,ror#31
360 add r6,r6,r7,ror#27 @ E+=ROR(A,27)
361 eor r9,r9,r11,ror#31
362 str r9,[r14,#-4]!
363 and r10,r3,r10,ror#2 @ F_xx_xx
364 and r11,r4,r5 @ F_xx_xx
365 add r6,r6,r9 @ E+=X[i]
366 add r6,r6,r10 @ E+=F_40_59(B,C,D)
367 add r6,r6,r11,ror#2
368 ldr r9,[r14,#15*4]
369 ldr r10,[r14,#13*4]
370 ldr r11,[r14,#7*4]
371 add r5,r8,r5,ror#2 @ E+=K_xx_xx
372 ldr r12,[r14,#2*4]
373 eor r9,r9,r10
374 eor r11,r11,r12 @ 1 cycle stall
375 eor r10,r3,r4 @ F_xx_xx
376 mov r9,r9,ror#31
377 add r5,r5,r6,ror#27 @ E+=ROR(A,27)
378 eor r9,r9,r11,ror#31
379 str r9,[r14,#-4]!
380 and r10,r7,r10,ror#2 @ F_xx_xx
381 and r11,r3,r4 @ F_xx_xx
382 add r5,r5,r9 @ E+=X[i]
383 add r5,r5,r10 @ E+=F_40_59(B,C,D)
384 add r5,r5,r11,ror#2
385 ldr r9,[r14,#15*4]
386 ldr r10,[r14,#13*4]
387 ldr r11,[r14,#7*4]
388 add r4,r8,r4,ror#2 @ E+=K_xx_xx
389 ldr r12,[r14,#2*4]
390 eor r9,r9,r10
391 eor r11,r11,r12 @ 1 cycle stall
392 eor r10,r7,r3 @ F_xx_xx
393 mov r9,r9,ror#31
394 add r4,r4,r5,ror#27 @ E+=ROR(A,27)
395 eor r9,r9,r11,ror#31
396 str r9,[r14,#-4]!
397 and r10,r6,r10,ror#2 @ F_xx_xx
398 and r11,r7,r3 @ F_xx_xx
399 add r4,r4,r9 @ E+=X[i]
400 add r4,r4,r10 @ E+=F_40_59(B,C,D)
401 add r4,r4,r11,ror#2
402 ldr r9,[r14,#15*4]
403 ldr r10,[r14,#13*4]
404 ldr r11,[r14,#7*4]
405 add r3,r8,r3,ror#2 @ E+=K_xx_xx
406 ldr r12,[r14,#2*4]
407 eor r9,r9,r10
408 eor r11,r11,r12 @ 1 cycle stall
409 eor r10,r6,r7 @ F_xx_xx
410 mov r9,r9,ror#31
411 add r3,r3,r4,ror#27 @ E+=ROR(A,27)
412 eor r9,r9,r11,ror#31
413 str r9,[r14,#-4]!
414 and r10,r5,r10,ror#2 @ F_xx_xx
415 and r11,r6,r7 @ F_xx_xx
416 add r3,r3,r9 @ E+=X[i]
417 add r3,r3,r10 @ E+=F_40_59(B,C,D)
418 add r3,r3,r11,ror#2
419 teq r14,sp
420 bne .L_40_59 @ [+((12+5)*5+2)*4]
421
422 ldr r8,.LK_60_79
423 sub sp,sp,#20*4
424 cmp sp,#0 @ set carry to denote 60_79
425 b .L_20_39_or_60_79 @ [+4], spare 300 bytes
426 .L_done:
427 add sp,sp,#80*4 @ "deallocate" stack frame
428 ldmia r0,{r8,r9,r10,r11,r12}
429 add r3,r8,r3
430 add r4,r9,r4
431 add r5,r10,r5,ror#2
432 add r6,r11,r6,ror#2
433 add r7,r12,r7,ror#2
434 stmia r0,{r3,r4,r5,r6,r7}
435 teq r1,r2
436 bne .Lloop @ [+18], total 1307
437
438 #if __ARM_ARCH__>=5
439 ldmia sp!,{r4-r12,pc}
440 #else
441 ldmia sp!,{r4-r12,lr}
442 tst lr,#1
443 moveq pc,lr @ be binary compatible with V4, yet
444 .word 0xe12fff1e @ interoperable with Thumb ISA:- )
445 #endif
446 .align 2
447 .LK_00_19: .word 0x5a827999
448 .LK_20_39: .word 0x6ed9eba1
449 .LK_40_59: .word 0x8f1bbcdc
450 .LK_60_79: .word 0xca62c1d6
451 .size sha1_block_data_order,.-sha1_block_data_order
452 .asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
453 .align 2
454
455 #endif
OLDNEW
« no previous file with comments | « third_party/boringssl/linux-arm/crypto/modes/ghash-armv4.S ('k') | third_party/boringssl/linux-arm/crypto/sha/sha256-armv4.S » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698