OLD | NEW |
(Empty) | |
| 1 #if defined(__arm__) |
| 2 #if defined(__arm__) |
| 3 #include "arm_arch.h" |
| 4 |
| 5 .syntax unified |
| 6 |
| 7 .text |
| 8 .code 32 |
| 9 |
| 10 #ifdef __APPLE__ |
| 11 #define ldrplb ldrbpl |
| 12 #define ldrneb ldrbne |
| 13 #endif |
| 14 |
| 15 .type rem_4bit,%object |
| 16 .align 5 |
| 17 rem_4bit: |
| 18 .short 0x0000,0x1C20,0x3840,0x2460 |
| 19 .short 0x7080,0x6CA0,0x48C0,0x54E0 |
| 20 .short 0xE100,0xFD20,0xD940,0xC560 |
| 21 .short 0x9180,0x8DA0,0xA9C0,0xB5E0 |
| 22 .size rem_4bit,.-rem_4bit |
| 23 |
| 24 .type rem_4bit_get,%function |
| 25 rem_4bit_get: |
| 26 sub r2,pc,#8 |
| 27 sub r2,r2,#32 @ &rem_4bit |
| 28 b .Lrem_4bit_got |
| 29 nop |
| 30 .size rem_4bit_get,.-rem_4bit_get |
| 31 |
| 32 .globl gcm_ghash_4bit |
| 33 .hidden gcm_ghash_4bit |
| 34 .type gcm_ghash_4bit,%function |
| 35 gcm_ghash_4bit: |
| 36 sub r12,pc,#8 |
| 37 add r3,r2,r3 @ r3 to point at the end |
| 38 stmdb sp!,{r3,r4,r5,r6,r7,r8,r9,r10,r11,lr} @ save r3/end to
o |
| 39 sub r12,r12,#48 @ &rem_4bit |
| 40 |
| 41 ldmia r12,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy rem_4bit ... |
| 42 stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ ... to stack |
| 43 |
| 44 ldrb r12,[r2,#15] |
| 45 ldrb r14,[r0,#15] |
| 46 .Louter: |
| 47 eor r12,r12,r14 |
| 48 and r14,r12,#0xf0 |
| 49 and r12,r12,#0x0f |
| 50 mov r3,#14 |
| 51 |
| 52 add r7,r1,r12,lsl#4 |
| 53 ldmia r7,{r4,r5,r6,r7} @ load Htbl[nlo] |
| 54 add r11,r1,r14 |
| 55 ldrb r12,[r2,#14] |
| 56 |
| 57 and r14,r4,#0xf @ rem |
| 58 ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi] |
| 59 add r14,r14,r14 |
| 60 eor r4,r8,r4,lsr#4 |
| 61 ldrh r8,[sp,r14] @ rem_4bit[rem] |
| 62 eor r4,r4,r5,lsl#28 |
| 63 ldrb r14,[r0,#14] |
| 64 eor r5,r9,r5,lsr#4 |
| 65 eor r5,r5,r6,lsl#28 |
| 66 eor r6,r10,r6,lsr#4 |
| 67 eor r6,r6,r7,lsl#28 |
| 68 eor r7,r11,r7,lsr#4 |
| 69 eor r12,r12,r14 |
| 70 and r14,r12,#0xf0 |
| 71 and r12,r12,#0x0f |
| 72 eor r7,r7,r8,lsl#16 |
| 73 |
| 74 .Linner: |
| 75 add r11,r1,r12,lsl#4 |
| 76 and r12,r4,#0xf @ rem |
| 77 subs r3,r3,#1 |
| 78 add r12,r12,r12 |
| 79 ldmia r11,{r8,r9,r10,r11} @ load Htbl[nlo] |
| 80 eor r4,r8,r4,lsr#4 |
| 81 eor r4,r4,r5,lsl#28 |
| 82 eor r5,r9,r5,lsr#4 |
| 83 eor r5,r5,r6,lsl#28 |
| 84 ldrh r8,[sp,r12] @ rem_4bit[rem] |
| 85 eor r6,r10,r6,lsr#4 |
| 86 ldrbpl r12,[r2,r3] |
| 87 eor r6,r6,r7,lsl#28 |
| 88 eor r7,r11,r7,lsr#4 |
| 89 |
| 90 add r11,r1,r14 |
| 91 and r14,r4,#0xf @ rem |
| 92 eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem] |
| 93 add r14,r14,r14 |
| 94 ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi] |
| 95 eor r4,r8,r4,lsr#4 |
| 96 ldrbpl r8,[r0,r3] |
| 97 eor r4,r4,r5,lsl#28 |
| 98 eor r5,r9,r5,lsr#4 |
| 99 ldrh r9,[sp,r14] |
| 100 eor r5,r5,r6,lsl#28 |
| 101 eor r6,r10,r6,lsr#4 |
| 102 eor r6,r6,r7,lsl#28 |
| 103 eorpl r12,r12,r8 |
| 104 eor r7,r11,r7,lsr#4 |
| 105 andpl r14,r12,#0xf0 |
| 106 andpl r12,r12,#0x0f |
| 107 eor r7,r7,r9,lsl#16 @ ^= rem_4bit[rem] |
| 108 bpl .Linner |
| 109 |
| 110 ldr r3,[sp,#32] @ re-load r3/end |
| 111 add r2,r2,#16 |
| 112 mov r14,r4 |
| 113 #if __ARM_ARCH__>=7 && defined(__ARMEL__) |
| 114 rev r4,r4 |
| 115 str r4,[r0,#12] |
| 116 #elif defined(__ARMEB__) |
| 117 str r4,[r0,#12] |
| 118 #else |
| 119 mov r9,r4,lsr#8 |
| 120 strb r4,[r0,#12+3] |
| 121 mov r10,r4,lsr#16 |
| 122 strb r9,[r0,#12+2] |
| 123 mov r11,r4,lsr#24 |
| 124 strb r10,[r0,#12+1] |
| 125 strb r11,[r0,#12] |
| 126 #endif |
| 127 cmp r2,r3 |
| 128 #if __ARM_ARCH__>=7 && defined(__ARMEL__) |
| 129 rev r5,r5 |
| 130 str r5,[r0,#8] |
| 131 #elif defined(__ARMEB__) |
| 132 str r5,[r0,#8] |
| 133 #else |
| 134 mov r9,r5,lsr#8 |
| 135 strb r5,[r0,#8+3] |
| 136 mov r10,r5,lsr#16 |
| 137 strb r9,[r0,#8+2] |
| 138 mov r11,r5,lsr#24 |
| 139 strb r10,[r0,#8+1] |
| 140 strb r11,[r0,#8] |
| 141 #endif |
| 142 ldrbne r12,[r2,#15] |
| 143 #if __ARM_ARCH__>=7 && defined(__ARMEL__) |
| 144 rev r6,r6 |
| 145 str r6,[r0,#4] |
| 146 #elif defined(__ARMEB__) |
| 147 str r6,[r0,#4] |
| 148 #else |
| 149 mov r9,r6,lsr#8 |
| 150 strb r6,[r0,#4+3] |
| 151 mov r10,r6,lsr#16 |
| 152 strb r9,[r0,#4+2] |
| 153 mov r11,r6,lsr#24 |
| 154 strb r10,[r0,#4+1] |
| 155 strb r11,[r0,#4] |
| 156 #endif |
| 157 |
| 158 #if __ARM_ARCH__>=7 && defined(__ARMEL__) |
| 159 rev r7,r7 |
| 160 str r7,[r0,#0] |
| 161 #elif defined(__ARMEB__) |
| 162 str r7,[r0,#0] |
| 163 #else |
| 164 mov r9,r7,lsr#8 |
| 165 strb r7,[r0,#0+3] |
| 166 mov r10,r7,lsr#16 |
| 167 strb r9,[r0,#0+2] |
| 168 mov r11,r7,lsr#24 |
| 169 strb r10,[r0,#0+1] |
| 170 strb r11,[r0,#0] |
| 171 #endif |
| 172 |
| 173 bne .Louter |
| 174 |
| 175 add sp,sp,#36 |
| 176 #if __ARM_ARCH__>=5 |
| 177 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} |
| 178 #else |
| 179 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} |
| 180 tst lr,#1 |
| 181 moveq pc,lr @ be binary compatible with V4, yet |
| 182 .word 0xe12fff1e @ interoperable with Thumb ISA:-) |
| 183 #endif |
| 184 .size gcm_ghash_4bit,.-gcm_ghash_4bit |
| 185 |
| 186 .globl gcm_gmult_4bit |
| 187 .hidden gcm_gmult_4bit |
| 188 .type gcm_gmult_4bit,%function |
| 189 gcm_gmult_4bit: |
| 190 stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} |
| 191 ldrb r12,[r0,#15] |
| 192 b rem_4bit_get |
| 193 .Lrem_4bit_got: |
| 194 and r14,r12,#0xf0 |
| 195 and r12,r12,#0x0f |
| 196 mov r3,#14 |
| 197 |
| 198 add r7,r1,r12,lsl#4 |
| 199 ldmia r7,{r4,r5,r6,r7} @ load Htbl[nlo] |
| 200 ldrb r12,[r0,#14] |
| 201 |
| 202 add r11,r1,r14 |
| 203 and r14,r4,#0xf @ rem |
| 204 ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi] |
| 205 add r14,r14,r14 |
| 206 eor r4,r8,r4,lsr#4 |
| 207 ldrh r8,[r2,r14] @ rem_4bit[rem] |
| 208 eor r4,r4,r5,lsl#28 |
| 209 eor r5,r9,r5,lsr#4 |
| 210 eor r5,r5,r6,lsl#28 |
| 211 eor r6,r10,r6,lsr#4 |
| 212 eor r6,r6,r7,lsl#28 |
| 213 eor r7,r11,r7,lsr#4 |
| 214 and r14,r12,#0xf0 |
| 215 eor r7,r7,r8,lsl#16 |
| 216 and r12,r12,#0x0f |
| 217 |
| 218 .Loop: |
| 219 add r11,r1,r12,lsl#4 |
| 220 and r12,r4,#0xf @ rem |
| 221 subs r3,r3,#1 |
| 222 add r12,r12,r12 |
| 223 ldmia r11,{r8,r9,r10,r11} @ load Htbl[nlo] |
| 224 eor r4,r8,r4,lsr#4 |
| 225 eor r4,r4,r5,lsl#28 |
| 226 eor r5,r9,r5,lsr#4 |
| 227 eor r5,r5,r6,lsl#28 |
| 228 ldrh r8,[r2,r12] @ rem_4bit[rem] |
| 229 eor r6,r10,r6,lsr#4 |
| 230 ldrbpl r12,[r0,r3] |
| 231 eor r6,r6,r7,lsl#28 |
| 232 eor r7,r11,r7,lsr#4 |
| 233 |
| 234 add r11,r1,r14 |
| 235 and r14,r4,#0xf @ rem |
| 236 eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem] |
| 237 add r14,r14,r14 |
| 238 ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi] |
| 239 eor r4,r8,r4,lsr#4 |
| 240 eor r4,r4,r5,lsl#28 |
| 241 eor r5,r9,r5,lsr#4 |
| 242 ldrh r8,[r2,r14] @ rem_4bit[rem] |
| 243 eor r5,r5,r6,lsl#28 |
| 244 eor r6,r10,r6,lsr#4 |
| 245 eor r6,r6,r7,lsl#28 |
| 246 eor r7,r11,r7,lsr#4 |
| 247 andpl r14,r12,#0xf0 |
| 248 andpl r12,r12,#0x0f |
| 249 eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem] |
| 250 bpl .Loop |
| 251 #if __ARM_ARCH__>=7 && defined(__ARMEL__) |
| 252 rev r4,r4 |
| 253 str r4,[r0,#12] |
| 254 #elif defined(__ARMEB__) |
| 255 str r4,[r0,#12] |
| 256 #else |
| 257 mov r9,r4,lsr#8 |
| 258 strb r4,[r0,#12+3] |
| 259 mov r10,r4,lsr#16 |
| 260 strb r9,[r0,#12+2] |
| 261 mov r11,r4,lsr#24 |
| 262 strb r10,[r0,#12+1] |
| 263 strb r11,[r0,#12] |
| 264 #endif |
| 265 |
| 266 #if __ARM_ARCH__>=7 && defined(__ARMEL__) |
| 267 rev r5,r5 |
| 268 str r5,[r0,#8] |
| 269 #elif defined(__ARMEB__) |
| 270 str r5,[r0,#8] |
| 271 #else |
| 272 mov r9,r5,lsr#8 |
| 273 strb r5,[r0,#8+3] |
| 274 mov r10,r5,lsr#16 |
| 275 strb r9,[r0,#8+2] |
| 276 mov r11,r5,lsr#24 |
| 277 strb r10,[r0,#8+1] |
| 278 strb r11,[r0,#8] |
| 279 #endif |
| 280 |
| 281 #if __ARM_ARCH__>=7 && defined(__ARMEL__) |
| 282 rev r6,r6 |
| 283 str r6,[r0,#4] |
| 284 #elif defined(__ARMEB__) |
| 285 str r6,[r0,#4] |
| 286 #else |
| 287 mov r9,r6,lsr#8 |
| 288 strb r6,[r0,#4+3] |
| 289 mov r10,r6,lsr#16 |
| 290 strb r9,[r0,#4+2] |
| 291 mov r11,r6,lsr#24 |
| 292 strb r10,[r0,#4+1] |
| 293 strb r11,[r0,#4] |
| 294 #endif |
| 295 |
| 296 #if __ARM_ARCH__>=7 && defined(__ARMEL__) |
| 297 rev r7,r7 |
| 298 str r7,[r0,#0] |
| 299 #elif defined(__ARMEB__) |
| 300 str r7,[r0,#0] |
| 301 #else |
| 302 mov r9,r7,lsr#8 |
| 303 strb r7,[r0,#0+3] |
| 304 mov r10,r7,lsr#16 |
| 305 strb r9,[r0,#0+2] |
| 306 mov r11,r7,lsr#24 |
| 307 strb r10,[r0,#0+1] |
| 308 strb r11,[r0,#0] |
| 309 #endif |
| 310 |
| 311 #if __ARM_ARCH__>=5 |
| 312 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} |
| 313 #else |
| 314 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} |
| 315 tst lr,#1 |
| 316 moveq pc,lr @ be binary compatible with V4, yet |
| 317 .word 0xe12fff1e @ interoperable with Thumb ISA:-) |
| 318 #endif |
| 319 .size gcm_gmult_4bit,.-gcm_gmult_4bit |
| 320 #if __ARM_MAX_ARCH__>=7 |
| 321 .arch armv7-a |
| 322 .fpu neon |
| 323 |
| 324 .globl gcm_init_neon |
| 325 .hidden gcm_init_neon |
| 326 .type gcm_init_neon,%function |
| 327 .align 4 |
| 328 gcm_init_neon: |
| 329 vld1.64 d7,[r1]! @ load H |
| 330 vmov.i8 q8,#0xe1 |
| 331 vld1.64 d6,[r1] |
| 332 vshl.i64 d17,#57 |
| 333 vshr.u64 d16,#63 @ t0=0xc2....01 |
| 334 vdup.8 q9,d7[7] |
| 335 vshr.u64 d26,d6,#63 |
| 336 vshr.s8 q9,#7 @ broadcast carry bit |
| 337 vshl.i64 q3,q3,#1 |
| 338 vand q8,q8,q9 |
| 339 vorr d7,d26 @ H<<<=1 |
| 340 veor q3,q3,q8 @ twisted H |
| 341 vstmia r0,{q3} |
| 342 |
| 343 bx lr @ bx lr |
| 344 .size gcm_init_neon,.-gcm_init_neon |
| 345 |
| 346 .globl gcm_gmult_neon |
| 347 .hidden gcm_gmult_neon |
| 348 .type gcm_gmult_neon,%function |
| 349 .align 4 |
| 350 gcm_gmult_neon: |
| 351 vld1.64 d7,[r0]! @ load Xi |
| 352 vld1.64 d6,[r0]! |
| 353 vmov.i64 d29,#0x0000ffffffffffff |
| 354 vldmia r1,{d26,d27} @ load twisted H |
| 355 vmov.i64 d30,#0x00000000ffffffff |
| 356 #ifdef __ARMEL__ |
| 357 vrev64.8 q3,q3 |
| 358 #endif |
| 359 vmov.i64 d31,#0x000000000000ffff |
| 360 veor d28,d26,d27 @ Karatsuba pre-processing |
| 361 mov r3,#16 |
| 362 b .Lgmult_neon |
| 363 .size gcm_gmult_neon,.-gcm_gmult_neon |
| 364 |
| 365 .globl gcm_ghash_neon |
| 366 .hidden gcm_ghash_neon |
| 367 .type gcm_ghash_neon,%function |
| 368 .align 4 |
| 369 gcm_ghash_neon: |
| 370 vld1.64 d1,[r0]! @ load Xi |
| 371 vld1.64 d0,[r0]! |
| 372 vmov.i64 d29,#0x0000ffffffffffff |
| 373 vldmia r1,{d26,d27} @ load twisted H |
| 374 vmov.i64 d30,#0x00000000ffffffff |
| 375 #ifdef __ARMEL__ |
| 376 vrev64.8 q0,q0 |
| 377 #endif |
| 378 vmov.i64 d31,#0x000000000000ffff |
| 379 veor d28,d26,d27 @ Karatsuba pre-processing |
| 380 |
| 381 .Loop_neon: |
| 382 vld1.64 d7,[r2]! @ load inp |
| 383 vld1.64 d6,[r2]! |
| 384 #ifdef __ARMEL__ |
| 385 vrev64.8 q3,q3 |
| 386 #endif |
| 387 veor q3,q0 @ inp^=Xi |
| 388 .Lgmult_neon: |
| 389 vext.8 d16, d26, d26, #1 @ A1 |
| 390 vmull.p8 q8, d16, d6 @ F = A1*B |
| 391 vext.8 d0, d6, d6, #1 @ B1 |
| 392 vmull.p8 q0, d26, d0 @ E = A*B1 |
| 393 vext.8 d18, d26, d26, #2 @ A2 |
| 394 vmull.p8 q9, d18, d6 @ H = A2*B |
| 395 vext.8 d22, d6, d6, #2 @ B2 |
| 396 vmull.p8 q11, d26, d22 @ G = A*B2 |
| 397 vext.8 d20, d26, d26, #3 @ A3 |
| 398 veor q8, q8, q0 @ L = E + F |
| 399 vmull.p8 q10, d20, d6 @ J = A3*B |
| 400 vext.8 d0, d6, d6, #3 @ B3 |
| 401 veor q9, q9, q11 @ M = G + H |
| 402 vmull.p8 q0, d26, d0 @ I = A*B3 |
| 403 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 |
| 404 vand d17, d17, d29 |
| 405 vext.8 d22, d6, d6, #4 @ B4 |
| 406 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 |
| 407 vand d19, d19, d30 |
| 408 vmull.p8 q11, d26, d22 @ K = A*B4 |
| 409 veor q10, q10, q0 @ N = I + J |
| 410 veor d16, d16, d17 |
| 411 veor d18, d18, d19 |
| 412 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 |
| 413 vand d21, d21, d31 |
| 414 vext.8 q8, q8, q8, #15 |
| 415 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 |
| 416 vmov.i64 d23, #0 |
| 417 vext.8 q9, q9, q9, #14 |
| 418 veor d20, d20, d21 |
| 419 vmull.p8 q0, d26, d6 @ D = A*B |
| 420 vext.8 q11, q11, q11, #12 |
| 421 vext.8 q10, q10, q10, #13 |
| 422 veor q8, q8, q9 |
| 423 veor q10, q10, q11 |
| 424 veor q0, q0, q8 |
| 425 veor q0, q0, q10 |
| 426 veor d6,d6,d7 @ Karatsuba pre-processing |
| 427 vext.8 d16, d28, d28, #1 @ A1 |
| 428 vmull.p8 q8, d16, d6 @ F = A1*B |
| 429 vext.8 d2, d6, d6, #1 @ B1 |
| 430 vmull.p8 q1, d28, d2 @ E = A*B1 |
| 431 vext.8 d18, d28, d28, #2 @ A2 |
| 432 vmull.p8 q9, d18, d6 @ H = A2*B |
| 433 vext.8 d22, d6, d6, #2 @ B2 |
| 434 vmull.p8 q11, d28, d22 @ G = A*B2 |
| 435 vext.8 d20, d28, d28, #3 @ A3 |
| 436 veor q8, q8, q1 @ L = E + F |
| 437 vmull.p8 q10, d20, d6 @ J = A3*B |
| 438 vext.8 d2, d6, d6, #3 @ B3 |
| 439 veor q9, q9, q11 @ M = G + H |
| 440 vmull.p8 q1, d28, d2 @ I = A*B3 |
| 441 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 |
| 442 vand d17, d17, d29 |
| 443 vext.8 d22, d6, d6, #4 @ B4 |
| 444 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 |
| 445 vand d19, d19, d30 |
| 446 vmull.p8 q11, d28, d22 @ K = A*B4 |
| 447 veor q10, q10, q1 @ N = I + J |
| 448 veor d16, d16, d17 |
| 449 veor d18, d18, d19 |
| 450 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 |
| 451 vand d21, d21, d31 |
| 452 vext.8 q8, q8, q8, #15 |
| 453 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 |
| 454 vmov.i64 d23, #0 |
| 455 vext.8 q9, q9, q9, #14 |
| 456 veor d20, d20, d21 |
| 457 vmull.p8 q1, d28, d6 @ D = A*B |
| 458 vext.8 q11, q11, q11, #12 |
| 459 vext.8 q10, q10, q10, #13 |
| 460 veor q8, q8, q9 |
| 461 veor q10, q10, q11 |
| 462 veor q1, q1, q8 |
| 463 veor q1, q1, q10 |
| 464 vext.8 d16, d27, d27, #1 @ A1 |
| 465 vmull.p8 q8, d16, d7 @ F = A1*B |
| 466 vext.8 d4, d7, d7, #1 @ B1 |
| 467 vmull.p8 q2, d27, d4 @ E = A*B1 |
| 468 vext.8 d18, d27, d27, #2 @ A2 |
| 469 vmull.p8 q9, d18, d7 @ H = A2*B |
| 470 vext.8 d22, d7, d7, #2 @ B2 |
| 471 vmull.p8 q11, d27, d22 @ G = A*B2 |
| 472 vext.8 d20, d27, d27, #3 @ A3 |
| 473 veor q8, q8, q2 @ L = E + F |
| 474 vmull.p8 q10, d20, d7 @ J = A3*B |
| 475 vext.8 d4, d7, d7, #3 @ B3 |
| 476 veor q9, q9, q11 @ M = G + H |
| 477 vmull.p8 q2, d27, d4 @ I = A*B3 |
| 478 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 |
| 479 vand d17, d17, d29 |
| 480 vext.8 d22, d7, d7, #4 @ B4 |
| 481 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 |
| 482 vand d19, d19, d30 |
| 483 vmull.p8 q11, d27, d22 @ K = A*B4 |
| 484 veor q10, q10, q2 @ N = I + J |
| 485 veor d16, d16, d17 |
| 486 veor d18, d18, d19 |
| 487 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 |
| 488 vand d21, d21, d31 |
| 489 vext.8 q8, q8, q8, #15 |
| 490 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 |
| 491 vmov.i64 d23, #0 |
| 492 vext.8 q9, q9, q9, #14 |
| 493 veor d20, d20, d21 |
| 494 vmull.p8 q2, d27, d7 @ D = A*B |
| 495 vext.8 q11, q11, q11, #12 |
| 496 vext.8 q10, q10, q10, #13 |
| 497 veor q8, q8, q9 |
| 498 veor q10, q10, q11 |
| 499 veor q2, q2, q8 |
| 500 veor q2, q2, q10 |
| 501 veor q1,q1,q0 @ Karatsuba post-processing |
| 502 veor q1,q1,q2 |
| 503 veor d1,d1,d2 |
| 504 veor d4,d4,d3 @ Xh|Xl - 256-bit result |
| 505 |
| 506 @ equivalent of reduction_avx from ghash-x86_64.pl |
| 507 vshl.i64 q9,q0,#57 @ 1st phase |
| 508 vshl.i64 q10,q0,#62 |
| 509 veor q10,q10,q9 @ |
| 510 vshl.i64 q9,q0,#63 |
| 511 veor q10, q10, q9 @ |
| 512 veor d1,d1,d20 @ |
| 513 veor d4,d4,d21 |
| 514 |
| 515 vshr.u64 q10,q0,#1 @ 2nd phase |
| 516 veor q2,q2,q0 |
| 517 veor q0,q0,q10 @ |
| 518 vshr.u64 q10,q10,#6 |
| 519 vshr.u64 q0,q0,#1 @ |
| 520 veor q0,q0,q2 @ |
| 521 veor q0,q0,q10 @ |
| 522 |
| 523 subs r3,#16 |
| 524 bne .Loop_neon |
| 525 |
| 526 #ifdef __ARMEL__ |
| 527 vrev64.8 q0,q0 |
| 528 #endif |
| 529 sub r0,#16 |
| 530 vst1.64 d1,[r0]! @ write out Xi |
| 531 vst1.64 d0,[r0] |
| 532 |
| 533 bx lr @ bx lr |
| 534 .size gcm_ghash_neon,.-gcm_ghash_neon |
| 535 #endif |
| 536 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67
,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,11
0,115,115,108,46,111,114,103,62,0 |
| 537 .align 2 |
| 538 .align 2 |
| 539 |
| 540 #endif |
| 541 #endif |
OLD | NEW |