Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(313)

Side by Side Diff: third_party/boringssl/linux-arm/crypto/modes/ghash-armv4.S

Issue 377783004: Add BoringSSL GYP files. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Final Python fix. Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 #if defined(__arm__)
2 #include "arm_arch.h"
3
4 .text
5 .code 32
6
7 .type rem_4bit,%object
8 .align 5
9 rem_4bit:
10 .short 0x0000,0x1C20,0x3840,0x2460
11 .short 0x7080,0x6CA0,0x48C0,0x54E0
12 .short 0xE100,0xFD20,0xD940,0xC560
13 .short 0x9180,0x8DA0,0xA9C0,0xB5E0
14 .size rem_4bit,.-rem_4bit
15
16 .type rem_4bit_get,%function
17 rem_4bit_get:
18 sub r2,pc,#8
19 sub r2,r2,#32 @ &rem_4bit
20 b .Lrem_4bit_got
21 nop
22 .size rem_4bit_get,.-rem_4bit_get
23
24 .global gcm_ghash_4bit
25 .type gcm_ghash_4bit,%function
26 gcm_ghash_4bit:
27 sub r12,pc,#8
28 add r3,r2,r3 @ r3 to point at the end
29 stmdb sp!,{r3-r11,lr} @ save r3/end too
30 sub r12,r12,#48 @ &rem_4bit
31
32 ldmia r12,{r4-r11} @ copy rem_4bit ...
33 stmdb sp!,{r4-r11} @ ... to stack
34
35 ldrb r12,[r2,#15]
36 ldrb r14,[r0,#15]
37 .Louter:
38 eor r12,r12,r14
39 and r14,r12,#0xf0
40 and r12,r12,#0x0f
41 mov r3,#14
42
43 add r7,r1,r12,lsl#4
44 ldmia r7,{r4-r7} @ load Htbl[nlo]
45 add r11,r1,r14
46 ldrb r12,[r2,#14]
47
48 and r14,r4,#0xf @ rem
49 ldmia r11,{r8-r11} @ load Htbl[nhi]
50 add r14,r14,r14
51 eor r4,r8,r4,lsr#4
52 ldrh r8,[sp,r14] @ rem_4bit[rem]
53 eor r4,r4,r5,lsl#28
54 ldrb r14,[r0,#14]
55 eor r5,r9,r5,lsr#4
56 eor r5,r5,r6,lsl#28
57 eor r6,r10,r6,lsr#4
58 eor r6,r6,r7,lsl#28
59 eor r7,r11,r7,lsr#4
60 eor r12,r12,r14
61 and r14,r12,#0xf0
62 and r12,r12,#0x0f
63 eor r7,r7,r8,lsl#16
64
65 .Linner:
66 add r11,r1,r12,lsl#4
67 and r12,r4,#0xf @ rem
68 subs r3,r3,#1
69 add r12,r12,r12
70 ldmia r11,{r8-r11} @ load Htbl[nlo]
71 eor r4,r8,r4,lsr#4
72 eor r4,r4,r5,lsl#28
73 eor r5,r9,r5,lsr#4
74 eor r5,r5,r6,lsl#28
75 ldrh r8,[sp,r12] @ rem_4bit[rem]
76 eor r6,r10,r6,lsr#4
77 ldrplb r12,[r2,r3]
78 eor r6,r6,r7,lsl#28
79 eor r7,r11,r7,lsr#4
80
81 add r11,r1,r14
82 and r14,r4,#0xf @ rem
83 eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
84 add r14,r14,r14
85 ldmia r11,{r8-r11} @ load Htbl[nhi]
86 eor r4,r8,r4,lsr#4
87 ldrplb r8,[r0,r3]
88 eor r4,r4,r5,lsl#28
89 eor r5,r9,r5,lsr#4
90 ldrh r9,[sp,r14]
91 eor r5,r5,r6,lsl#28
92 eor r6,r10,r6,lsr#4
93 eor r6,r6,r7,lsl#28
94 eorpl r12,r12,r8
95 eor r7,r11,r7,lsr#4
96 andpl r14,r12,#0xf0
97 andpl r12,r12,#0x0f
98 eor r7,r7,r9,lsl#16 @ ^= rem_4bit[rem]
99 bpl .Linner
100
101 ldr r3,[sp,#32] @ re-load r3/end
102 add r2,r2,#16
103 mov r14,r4
104 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
105 rev r4,r4
106 str r4,[r0,#12]
107 #elif defined(__ARMEB__)
108 str r4,[r0,#12]
109 #else
110 mov r9,r4,lsr#8
111 strb r4,[r0,#12+3]
112 mov r10,r4,lsr#16
113 strb r9,[r0,#12+2]
114 mov r11,r4,lsr#24
115 strb r10,[r0,#12+1]
116 strb r11,[r0,#12]
117 #endif
118 cmp r2,r3
119 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
120 rev r5,r5
121 str r5,[r0,#8]
122 #elif defined(__ARMEB__)
123 str r5,[r0,#8]
124 #else
125 mov r9,r5,lsr#8
126 strb r5,[r0,#8+3]
127 mov r10,r5,lsr#16
128 strb r9,[r0,#8+2]
129 mov r11,r5,lsr#24
130 strb r10,[r0,#8+1]
131 strb r11,[r0,#8]
132 #endif
133 ldrneb r12,[r2,#15]
134 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
135 rev r6,r6
136 str r6,[r0,#4]
137 #elif defined(__ARMEB__)
138 str r6,[r0,#4]
139 #else
140 mov r9,r6,lsr#8
141 strb r6,[r0,#4+3]
142 mov r10,r6,lsr#16
143 strb r9,[r0,#4+2]
144 mov r11,r6,lsr#24
145 strb r10,[r0,#4+1]
146 strb r11,[r0,#4]
147 #endif
148
149 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
150 rev r7,r7
151 str r7,[r0,#0]
152 #elif defined(__ARMEB__)
153 str r7,[r0,#0]
154 #else
155 mov r9,r7,lsr#8
156 strb r7,[r0,#0+3]
157 mov r10,r7,lsr#16
158 strb r9,[r0,#0+2]
159 mov r11,r7,lsr#24
160 strb r10,[r0,#0+1]
161 strb r11,[r0,#0]
162 #endif
163
164 bne .Louter
165
166 add sp,sp,#36
167 #if __ARM_ARCH__>=5
168 ldmia sp!,{r4-r11,pc}
169 #else
170 ldmia sp!,{r4-r11,lr}
171 tst lr,#1
172 moveq pc,lr @ be binary compatible with V4, yet
173 .word 0xe12fff1e @ interoperable with Thumb ISA:- )
174 #endif
175 .size gcm_ghash_4bit,.-gcm_ghash_4bit
176
177 .global gcm_gmult_4bit
178 .type gcm_gmult_4bit,%function
179 gcm_gmult_4bit:
180 stmdb sp!,{r4-r11,lr}
181 ldrb r12,[r0,#15]
182 b rem_4bit_get
183 .Lrem_4bit_got:
184 and r14,r12,#0xf0
185 and r12,r12,#0x0f
186 mov r3,#14
187
188 add r7,r1,r12,lsl#4
189 ldmia r7,{r4-r7} @ load Htbl[nlo]
190 ldrb r12,[r0,#14]
191
192 add r11,r1,r14
193 and r14,r4,#0xf @ rem
194 ldmia r11,{r8-r11} @ load Htbl[nhi]
195 add r14,r14,r14
196 eor r4,r8,r4,lsr#4
197 ldrh r8,[r2,r14] @ rem_4bit[rem]
198 eor r4,r4,r5,lsl#28
199 eor r5,r9,r5,lsr#4
200 eor r5,r5,r6,lsl#28
201 eor r6,r10,r6,lsr#4
202 eor r6,r6,r7,lsl#28
203 eor r7,r11,r7,lsr#4
204 and r14,r12,#0xf0
205 eor r7,r7,r8,lsl#16
206 and r12,r12,#0x0f
207
208 .Loop:
209 add r11,r1,r12,lsl#4
210 and r12,r4,#0xf @ rem
211 subs r3,r3,#1
212 add r12,r12,r12
213 ldmia r11,{r8-r11} @ load Htbl[nlo]
214 eor r4,r8,r4,lsr#4
215 eor r4,r4,r5,lsl#28
216 eor r5,r9,r5,lsr#4
217 eor r5,r5,r6,lsl#28
218 ldrh r8,[r2,r12] @ rem_4bit[rem]
219 eor r6,r10,r6,lsr#4
220 ldrplb r12,[r0,r3]
221 eor r6,r6,r7,lsl#28
222 eor r7,r11,r7,lsr#4
223
224 add r11,r1,r14
225 and r14,r4,#0xf @ rem
226 eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
227 add r14,r14,r14
228 ldmia r11,{r8-r11} @ load Htbl[nhi]
229 eor r4,r8,r4,lsr#4
230 eor r4,r4,r5,lsl#28
231 eor r5,r9,r5,lsr#4
232 ldrh r8,[r2,r14] @ rem_4bit[rem]
233 eor r5,r5,r6,lsl#28
234 eor r6,r10,r6,lsr#4
235 eor r6,r6,r7,lsl#28
236 eor r7,r11,r7,lsr#4
237 andpl r14,r12,#0xf0
238 andpl r12,r12,#0x0f
239 eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
240 bpl .Loop
241 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
242 rev r4,r4
243 str r4,[r0,#12]
244 #elif defined(__ARMEB__)
245 str r4,[r0,#12]
246 #else
247 mov r9,r4,lsr#8
248 strb r4,[r0,#12+3]
249 mov r10,r4,lsr#16
250 strb r9,[r0,#12+2]
251 mov r11,r4,lsr#24
252 strb r10,[r0,#12+1]
253 strb r11,[r0,#12]
254 #endif
255
256 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
257 rev r5,r5
258 str r5,[r0,#8]
259 #elif defined(__ARMEB__)
260 str r5,[r0,#8]
261 #else
262 mov r9,r5,lsr#8
263 strb r5,[r0,#8+3]
264 mov r10,r5,lsr#16
265 strb r9,[r0,#8+2]
266 mov r11,r5,lsr#24
267 strb r10,[r0,#8+1]
268 strb r11,[r0,#8]
269 #endif
270
271 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
272 rev r6,r6
273 str r6,[r0,#4]
274 #elif defined(__ARMEB__)
275 str r6,[r0,#4]
276 #else
277 mov r9,r6,lsr#8
278 strb r6,[r0,#4+3]
279 mov r10,r6,lsr#16
280 strb r9,[r0,#4+2]
281 mov r11,r6,lsr#24
282 strb r10,[r0,#4+1]
283 strb r11,[r0,#4]
284 #endif
285
286 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
287 rev r7,r7
288 str r7,[r0,#0]
289 #elif defined(__ARMEB__)
290 str r7,[r0,#0]
291 #else
292 mov r9,r7,lsr#8
293 strb r7,[r0,#0+3]
294 mov r10,r7,lsr#16
295 strb r9,[r0,#0+2]
296 mov r11,r7,lsr#24
297 strb r10,[r0,#0+1]
298 strb r11,[r0,#0]
299 #endif
300
301 #if __ARM_ARCH__>=5
302 ldmia sp!,{r4-r11,pc}
303 #else
304 ldmia sp!,{r4-r11,lr}
305 tst lr,#1
306 moveq pc,lr @ be binary compatible with V4, yet
307 .word 0xe12fff1e @ interoperable with Thumb ISA:- )
308 #endif
309 .size gcm_gmult_4bit,.-gcm_gmult_4bit
310 #if __ARM_ARCH__>=7
311 .fpu neon
312
313 .global gcm_init_neon
314 .type gcm_init_neon,%function
315 .align 4
316 gcm_init_neon:
317 vld1.64 d7,[r1,:64]! @ load H
318 vmov.i8 q8,#0xe1
319 vld1.64 d6,[r1,:64]
320 vshl.i64 d17,#57
321 vshr.u64 d16,#63 @ t0=0xc2....01
322 vdup.8 q9,d7[7]
323 vshr.u64 d26,d6,#63
324 vshr.s8 q9,#7 @ broadcast carry bit
325 vshl.i64 q3,q3,#1
326 vand q8,q8,q9
327 vorr d7,d26 @ H<<<=1
328 veor q3,q3,q8 @ twisted H
329 vstmia r0,{q3}
330
331 .word 0xe12fff1e
332 .size gcm_init_neon,.-gcm_init_neon
333
334 .global gcm_gmult_neon
335 .type gcm_gmult_neon,%function
336 .align 4
337 gcm_gmult_neon:
338 vld1.64 d7,[r0,:64]! @ load Xi
339 vld1.64 d6,[r0,:64]!
340 vmov.i64 d29,#0x0000ffffffffffff
341 vldmia r1,{d26-d27} @ load twisted H
342 vmov.i64 d30,#0x00000000ffffffff
343 #ifdef __ARMEL__
344 vrev64.8 q3,q3
345 #endif
346 vmov.i64 d31,#0x000000000000ffff
347 veor d28,d26,d27 @ Karatsuba pre-processing
348 mov r3,#16
349 b .Lgmult_neon
350 .size gcm_gmult_neon,.-gcm_gmult_neon
351
352 .global gcm_ghash_neon
353 .type gcm_ghash_neon,%function
354 .align 4
355 gcm_ghash_neon:
356 vld1.64 d1,[r0,:64]! @ load Xi
357 vld1.64 d0,[r0,:64]!
358 vmov.i64 d29,#0x0000ffffffffffff
359 vldmia r1,{d26-d27} @ load twisted H
360 vmov.i64 d30,#0x00000000ffffffff
361 #ifdef __ARMEL__
362 vrev64.8 q0,q0
363 #endif
364 vmov.i64 d31,#0x000000000000ffff
365 veor d28,d26,d27 @ Karatsuba pre-processing
366
367 .Loop_neon:
368 vld1.64 d7,[r2]! @ load inp
369 vld1.64 d6,[r2]!
370 #ifdef __ARMEL__
371 vrev64.8 q3,q3
372 #endif
373 veor q3,q0 @ inp^=Xi
374 .Lgmult_neon:
375 vext.8 d16, d26, d26, #1 @ A1
376 vmull.p8 q8, d16, d6 @ F = A1*B
377 vext.8 d0, d6, d6, #1 @ B1
378 vmull.p8 q0, d26, d0 @ E = A*B1
379 vext.8 d18, d26, d26, #2 @ A2
380 vmull.p8 q9, d18, d6 @ H = A2*B
381 vext.8 d22, d6, d6, #2 @ B2
382 vmull.p8 q11, d26, d22 @ G = A*B2
383 vext.8 d20, d26, d26, #3 @ A3
384 veor q8, q8, q0 @ L = E + F
385 vmull.p8 q10, d20, d6 @ J = A3*B
386 vext.8 d0, d6, d6, #3 @ B3
387 veor q9, q9, q11 @ M = G + H
388 vmull.p8 q0, d26, d0 @ I = A*B3
389 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
390 vand d17, d17, d29
391 vext.8 d22, d6, d6, #4 @ B4
392 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
393 vand d19, d19, d30
394 vmull.p8 q11, d26, d22 @ K = A*B4
395 veor q10, q10, q0 @ N = I + J
396 veor d16, d16, d17
397 veor d18, d18, d19
398 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
399 vand d21, d21, d31
400 vext.8 q8, q8, q8, #15
401 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
402 vmov.i64 d23, #0
403 vext.8 q9, q9, q9, #14
404 veor d20, d20, d21
405 vmull.p8 q0, d26, d6 @ D = A*B
406 vext.8 q11, q11, q11, #12
407 vext.8 q10, q10, q10, #13
408 veor q8, q8, q9
409 veor q10, q10, q11
410 veor q0, q0, q8
411 veor q0, q0, q10
412 veor d6,d6,d7 @ Karatsuba pre-processing
413 vext.8 d16, d28, d28, #1 @ A1
414 vmull.p8 q8, d16, d6 @ F = A1*B
415 vext.8 d2, d6, d6, #1 @ B1
416 vmull.p8 q1, d28, d2 @ E = A*B1
417 vext.8 d18, d28, d28, #2 @ A2
418 vmull.p8 q9, d18, d6 @ H = A2*B
419 vext.8 d22, d6, d6, #2 @ B2
420 vmull.p8 q11, d28, d22 @ G = A*B2
421 vext.8 d20, d28, d28, #3 @ A3
422 veor q8, q8, q1 @ L = E + F
423 vmull.p8 q10, d20, d6 @ J = A3*B
424 vext.8 d2, d6, d6, #3 @ B3
425 veor q9, q9, q11 @ M = G + H
426 vmull.p8 q1, d28, d2 @ I = A*B3
427 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
428 vand d17, d17, d29
429 vext.8 d22, d6, d6, #4 @ B4
430 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
431 vand d19, d19, d30
432 vmull.p8 q11, d28, d22 @ K = A*B4
433 veor q10, q10, q1 @ N = I + J
434 veor d16, d16, d17
435 veor d18, d18, d19
436 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
437 vand d21, d21, d31
438 vext.8 q8, q8, q8, #15
439 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
440 vmov.i64 d23, #0
441 vext.8 q9, q9, q9, #14
442 veor d20, d20, d21
443 vmull.p8 q1, d28, d6 @ D = A*B
444 vext.8 q11, q11, q11, #12
445 vext.8 q10, q10, q10, #13
446 veor q8, q8, q9
447 veor q10, q10, q11
448 veor q1, q1, q8
449 veor q1, q1, q10
450 vext.8 d16, d27, d27, #1 @ A1
451 vmull.p8 q8, d16, d7 @ F = A1*B
452 vext.8 d4, d7, d7, #1 @ B1
453 vmull.p8 q2, d27, d4 @ E = A*B1
454 vext.8 d18, d27, d27, #2 @ A2
455 vmull.p8 q9, d18, d7 @ H = A2*B
456 vext.8 d22, d7, d7, #2 @ B2
457 vmull.p8 q11, d27, d22 @ G = A*B2
458 vext.8 d20, d27, d27, #3 @ A3
459 veor q8, q8, q2 @ L = E + F
460 vmull.p8 q10, d20, d7 @ J = A3*B
461 vext.8 d4, d7, d7, #3 @ B3
462 veor q9, q9, q11 @ M = G + H
463 vmull.p8 q2, d27, d4 @ I = A*B3
464 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
465 vand d17, d17, d29
466 vext.8 d22, d7, d7, #4 @ B4
467 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
468 vand d19, d19, d30
469 vmull.p8 q11, d27, d22 @ K = A*B4
470 veor q10, q10, q2 @ N = I + J
471 veor d16, d16, d17
472 veor d18, d18, d19
473 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
474 vand d21, d21, d31
475 vext.8 q8, q8, q8, #15
476 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
477 vmov.i64 d23, #0
478 vext.8 q9, q9, q9, #14
479 veor d20, d20, d21
480 vmull.p8 q2, d27, d7 @ D = A*B
481 vext.8 q11, q11, q11, #12
482 vext.8 q10, q10, q10, #13
483 veor q8, q8, q9
484 veor q10, q10, q11
485 veor q2, q2, q8
486 veor q2, q2, q10
487 veor q1,q1,q0 @ Karatsuba post-processing
488 veor q1,q1,q2
489 veor d1,d1,d2
490 veor d4,d4,d3 @ Xh|Xl - 256-bit result
491
492 @ equivalent of reduction_avx from ghash-x86_64.pl
493 vshl.i64 q9,q0,#57 @ 1st phase
494 vshl.i64 q10,q0,#62
495 veor q10,q10,q9 @
496 vshl.i64 q9,q0,#63
497 veor q10, q10, q9 @
498 veor d1,d1,d20 @
499 veor d4,d4,d21
500
501 vshr.u64 q10,q0,#1 @ 2nd phase
502 veor q2,q2,q0
503 veor q0,q0,q10 @
504 vshr.u64 q10,q10,#6
505 vshr.u64 q0,q0,#1 @
506 veor q0,q0,q2 @
507 veor q0,q0,q10 @
508
509 subs r3,#16
510 bne .Loop_neon
511
512 #ifdef __ARMEL__
513 vrev64.8 q0,q0
514 #endif
515 sub r0,#16
516 vst1.64 d1,[r0,:64]! @ write out Xi
517 vst1.64 d0,[r0,:64]
518
519 .word 0xe12fff1e
520 .size gcm_ghash_neon,.-gcm_ghash_neon
521 #endif
522 .asciz "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
523 .align 2
524
525 #endif
OLDNEW
« no previous file with comments | « third_party/boringssl/linux-arm/crypto/bn/armv4-mont.S ('k') | third_party/boringssl/linux-arm/crypto/sha/sha1-armv4-large.S » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698