OLD | NEW |
(Empty) | |
| 1 #include "arm_arch.h" |
| 2 |
| 3 .text |
| 4 .code 32 |
| 5 |
| 6 #if __ARM_ARCH__>=7 |
| 7 .align 5 |
| 8 .LOPENSSL_armcap: |
| 9 .word OPENSSL_armcap_P-bn_mul_mont |
| 10 #endif |
| 11 |
| 12 .global bn_mul_mont |
| 13 .type bn_mul_mont,%function |
| 14 |
| 15 .align 5 |
| 16 bn_mul_mont: |
| 17 ldr ip,[sp,#4] @ load num |
| 18 stmdb sp!,{r0,r2} @ sp points at argument block |
| 19 #if __ARM_ARCH__>=7 |
| 20 tst ip,#7 |
| 21 bne .Lialu |
| 22 adr r0,bn_mul_mont |
| 23 ldr r2,.LOPENSSL_armcap |
| 24 ldr r0,[r0,r2] |
| 25 tst r0,#1 @ NEON available? |
| 26 ldmia sp, {r0,r2} |
| 27 beq .Lialu |
| 28 add sp,sp,#8 |
| 29 b bn_mul8x_mont_neon |
| 30 .align 4 |
| 31 .Lialu: |
| 32 #endif |
| 33 cmp ip,#2 |
| 34 mov r0,ip @ load num |
| 35 movlt r0,#0 |
| 36 addlt sp,sp,#2*4 |
| 37 blt .Labrt |
| 38 |
| 39 stmdb sp!,{r4-r12,lr} @ save 10 registers |
| 40 |
| 41 mov r0,r0,lsl#2 @ rescale r0 for byte count |
| 42 sub sp,sp,r0 @ alloca(4*num) |
| 43 sub sp,sp,#4 @ +extra dword |
| 44 sub r0,r0,#4 @ "num=num-1" |
| 45 add r4,r2,r0 @ &bp[num-1] |
| 46 |
| 47 add r0,sp,r0 @ r0 to point at &tp[num-1] |
| 48 ldr r8,[r0,#14*4] @ &n0 |
| 49 ldr r2,[r2] @ bp[0] |
| 50 ldr r5,[r1],#4 @ ap[0],ap++ |
| 51 ldr r6,[r3],#4 @ np[0],np++ |
| 52 ldr r8,[r8] @ *n0 |
| 53 str r4,[r0,#15*4] @ save &bp[num] |
| 54 |
| 55 umull r10,r11,r5,r2 @ ap[0]*bp[0] |
| 56 str r8,[r0,#14*4] @ save n0 value |
| 57 mul r8,r10,r8 @ "tp[0]"*n0 |
| 58 mov r12,#0 |
| 59 umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" |
| 60 mov r4,sp |
| 61 |
| 62 .L1st: |
| 63 ldr r5,[r1],#4 @ ap[j],ap++ |
| 64 mov r10,r11 |
| 65 ldr r6,[r3],#4 @ np[j],np++ |
| 66 mov r11,#0 |
| 67 umlal r10,r11,r5,r2 @ ap[j]*bp[0] |
| 68 mov r14,#0 |
| 69 umlal r12,r14,r6,r8 @ np[j]*n0 |
| 70 adds r12,r12,r10 |
| 71 str r12,[r4],#4 @ tp[j-1]=,tp++ |
| 72 adc r12,r14,#0 |
| 73 cmp r4,r0 |
| 74 bne .L1st |
| 75 |
| 76 adds r12,r12,r11 |
| 77 ldr r4,[r0,#13*4] @ restore bp |
| 78 mov r14,#0 |
| 79 ldr r8,[r0,#14*4] @ restore n0 |
| 80 adc r14,r14,#0 |
| 81 str r12,[r0] @ tp[num-1]= |
| 82 str r14,[r0,#4] @ tp[num]= |
| 83 |
| 84 .Louter: |
| 85 sub r7,r0,sp @ "original" r0-1 value |
| 86 sub r1,r1,r7 @ "rewind" ap to &ap[1] |
| 87 ldr r2,[r4,#4]! @ *(++bp) |
| 88 sub r3,r3,r7 @ "rewind" np to &np[1] |
| 89 ldr r5,[r1,#-4] @ ap[0] |
| 90 ldr r10,[sp] @ tp[0] |
| 91 ldr r6,[r3,#-4] @ np[0] |
| 92 ldr r7,[sp,#4] @ tp[1] |
| 93 |
| 94 mov r11,#0 |
| 95 umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0] |
| 96 str r4,[r0,#13*4] @ save bp |
| 97 mul r8,r10,r8 |
| 98 mov r12,#0 |
| 99 umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]" |
| 100 mov r4,sp |
| 101 |
| 102 .Linner: |
| 103 ldr r5,[r1],#4 @ ap[j],ap++ |
| 104 adds r10,r11,r7 @ +=tp[j] |
| 105 ldr r6,[r3],#4 @ np[j],np++ |
| 106 mov r11,#0 |
| 107 umlal r10,r11,r5,r2 @ ap[j]*bp[i] |
| 108 mov r14,#0 |
| 109 umlal r12,r14,r6,r8 @ np[j]*n0 |
| 110 adc r11,r11,#0 |
| 111 ldr r7,[r4,#8] @ tp[j+1] |
| 112 adds r12,r12,r10 |
| 113 str r12,[r4],#4 @ tp[j-1]=,tp++ |
| 114 adc r12,r14,#0 |
| 115 cmp r4,r0 |
| 116 bne .Linner |
| 117 |
| 118 adds r12,r12,r11 |
| 119 mov r14,#0 |
| 120 ldr r4,[r0,#13*4] @ restore bp |
| 121 adc r14,r14,#0 |
| 122 ldr r8,[r0,#14*4] @ restore n0 |
| 123 adds r12,r12,r7 |
| 124 ldr r7,[r0,#15*4] @ restore &bp[num] |
| 125 adc r14,r14,#0 |
| 126 str r12,[r0] @ tp[num-1]= |
| 127 str r14,[r0,#4] @ tp[num]= |
| 128 |
| 129 cmp r4,r7 |
| 130 bne .Louter |
| 131 |
| 132 ldr r2,[r0,#12*4] @ pull rp |
| 133 add r0,r0,#4 @ r0 to point at &tp[num] |
| 134 sub r5,r0,sp @ "original" num value |
| 135 mov r4,sp @ "rewind" r4 |
| 136 mov r1,r4 @ "borrow" r1 |
| 137 sub r3,r3,r5 @ "rewind" r3 to &np[0] |
| 138 |
| 139 subs r7,r7,r7 @ "clear" carry flag |
| 140 .Lsub: ldr r7,[r4],#4 |
| 141 ldr r6,[r3],#4 |
| 142 sbcs r7,r7,r6 @ tp[j]-np[j] |
| 143 str r7,[r2],#4 @ rp[j]= |
| 144 teq r4,r0 @ preserve carry |
| 145 bne .Lsub |
| 146 sbcs r14,r14,#0 @ upmost carry |
| 147 mov r4,sp @ "rewind" r4 |
| 148 sub r2,r2,r5 @ "rewind" r2 |
| 149 |
| 150 and r1,r4,r14 |
| 151 bic r3,r2,r14 |
| 152 orr r1,r1,r3 @ ap=borrow?tp:rp |
| 153 |
| 154 .Lcopy: ldr r7,[r1],#4 @ copy or in-place refresh |
| 155 str sp,[r4],#4 @ zap tp |
| 156 str r7,[r2],#4 |
| 157 cmp r4,r0 |
| 158 bne .Lcopy |
| 159 |
| 160 add sp,r0,#4 @ skip over tp[num+1] |
| 161 ldmia sp!,{r4-r12,lr} @ restore registers |
| 162 add sp,sp,#2*4 @ skip over {r0,r2} |
| 163 mov r0,#1 |
| 164 .Labrt: tst lr,#1 |
| 165 moveq pc,lr @ be binary compatible with V4, yet |
| 166 .word 0xe12fff1e @ interoperable with Thumb ISA:-
) |
| 167 .size bn_mul_mont,.-bn_mul_mont |
| 168 #if __ARM_ARCH__>=7 |
| 169 .fpu neon |
| 170 |
| 171 .type bn_mul8x_mont_neon,%function |
| 172 .align 5 |
| 173 bn_mul8x_mont_neon: |
| 174 mov ip,sp |
| 175 stmdb sp!,{r4-r11} |
| 176 vstmdb sp!,{d8-d15} @ ABI specification says so |
| 177 ldmia ip,{r4-r5} @ load rest of parameter block |
| 178 |
| 179 sub r7,sp,#16 |
| 180 vld1.32 {d28[0]}, [r2,:32]! |
| 181 sub r7,r7,r5,lsl#4 |
| 182 vld1.32 {d0-d3}, [r1]! @ can't specify :32 :-( |
| 183 and r7,r7,#-64 |
| 184 vld1.32 {d30[0]}, [r4,:32] |
| 185 mov sp,r7 @ alloca |
| 186 veor d8,d8,d8 |
| 187 subs r8,r5,#8 |
| 188 vzip.16 d28,d8 |
| 189 |
| 190 vmull.u32 q6,d28,d0[0] |
| 191 vmull.u32 q7,d28,d0[1] |
| 192 vmull.u32 q8,d28,d1[0] |
| 193 vshl.i64 d10,d13,#16 |
| 194 vmull.u32 q9,d28,d1[1] |
| 195 |
| 196 vadd.u64 d10,d10,d12 |
| 197 veor d8,d8,d8 |
| 198 vmul.u32 d29,d10,d30 |
| 199 |
| 200 vmull.u32 q10,d28,d2[0] |
| 201 vld1.32 {d4-d7}, [r3]! |
| 202 vmull.u32 q11,d28,d2[1] |
| 203 vmull.u32 q12,d28,d3[0] |
| 204 vzip.16 d29,d8 |
| 205 vmull.u32 q13,d28,d3[1] |
| 206 |
| 207 bne .LNEON_1st |
| 208 |
| 209 @ special case for num=8, everything is in register bank... |
| 210 |
| 211 vmlal.u32 q6,d29,d4[0] |
| 212 sub r9,r5,#1 |
| 213 vmlal.u32 q7,d29,d4[1] |
| 214 vmlal.u32 q8,d29,d5[0] |
| 215 vmlal.u32 q9,d29,d5[1] |
| 216 |
| 217 vmlal.u32 q10,d29,d6[0] |
| 218 vmov q5,q6 |
| 219 vmlal.u32 q11,d29,d6[1] |
| 220 vmov q6,q7 |
| 221 vmlal.u32 q12,d29,d7[0] |
| 222 vmov q7,q8 |
| 223 vmlal.u32 q13,d29,d7[1] |
| 224 vmov q8,q9 |
| 225 vmov q9,q10 |
| 226 vshr.u64 d10,d10,#16 |
| 227 vmov q10,q11 |
| 228 vmov q11,q12 |
| 229 vadd.u64 d10,d10,d11 |
| 230 vmov q12,q13 |
| 231 veor q13,q13 |
| 232 vshr.u64 d10,d10,#16 |
| 233 |
| 234 b .LNEON_outer8 |
| 235 |
| 236 .align 4 |
| 237 .LNEON_outer8: |
| 238 vld1.32 {d28[0]}, [r2,:32]! |
| 239 veor d8,d8,d8 |
| 240 vzip.16 d28,d8 |
| 241 vadd.u64 d12,d12,d10 |
| 242 |
| 243 vmlal.u32 q6,d28,d0[0] |
| 244 vmlal.u32 q7,d28,d0[1] |
| 245 vmlal.u32 q8,d28,d1[0] |
| 246 vshl.i64 d10,d13,#16 |
| 247 vmlal.u32 q9,d28,d1[1] |
| 248 |
| 249 vadd.u64 d10,d10,d12 |
| 250 veor d8,d8,d8 |
| 251 subs r9,r9,#1 |
| 252 vmul.u32 d29,d10,d30 |
| 253 |
| 254 vmlal.u32 q10,d28,d2[0] |
| 255 vmlal.u32 q11,d28,d2[1] |
| 256 vmlal.u32 q12,d28,d3[0] |
| 257 vzip.16 d29,d8 |
| 258 vmlal.u32 q13,d28,d3[1] |
| 259 |
| 260 vmlal.u32 q6,d29,d4[0] |
| 261 vmlal.u32 q7,d29,d4[1] |
| 262 vmlal.u32 q8,d29,d5[0] |
| 263 vmlal.u32 q9,d29,d5[1] |
| 264 |
| 265 vmlal.u32 q10,d29,d6[0] |
| 266 vmov q5,q6 |
| 267 vmlal.u32 q11,d29,d6[1] |
| 268 vmov q6,q7 |
| 269 vmlal.u32 q12,d29,d7[0] |
| 270 vmov q7,q8 |
| 271 vmlal.u32 q13,d29,d7[1] |
| 272 vmov q8,q9 |
| 273 vmov q9,q10 |
| 274 vshr.u64 d10,d10,#16 |
| 275 vmov q10,q11 |
| 276 vmov q11,q12 |
| 277 vadd.u64 d10,d10,d11 |
| 278 vmov q12,q13 |
| 279 veor q13,q13 |
| 280 vshr.u64 d10,d10,#16 |
| 281 |
| 282 bne .LNEON_outer8 |
| 283 |
| 284 vadd.u64 d12,d12,d10 |
| 285 mov r7,sp |
| 286 vshr.u64 d10,d12,#16 |
| 287 mov r8,r5 |
| 288 vadd.u64 d13,d13,d10 |
| 289 add r6,sp,#16 |
| 290 vshr.u64 d10,d13,#16 |
| 291 vzip.16 d12,d13 |
| 292 |
| 293 b .LNEON_tail2 |
| 294 |
| 295 .align 4 |
| 296 .LNEON_1st: |
| 297 vmlal.u32 q6,d29,d4[0] |
| 298 vld1.32 {d0-d3}, [r1]! |
| 299 vmlal.u32 q7,d29,d4[1] |
| 300 subs r8,r8,#8 |
| 301 vmlal.u32 q8,d29,d5[0] |
| 302 vmlal.u32 q9,d29,d5[1] |
| 303 |
| 304 vmlal.u32 q10,d29,d6[0] |
| 305 vld1.32 {d4-d5}, [r3]! |
| 306 vmlal.u32 q11,d29,d6[1] |
| 307 vst1.64 {q6-q7}, [r7,:256]! |
| 308 vmlal.u32 q12,d29,d7[0] |
| 309 vmlal.u32 q13,d29,d7[1] |
| 310 vst1.64 {q8-q9}, [r7,:256]! |
| 311 |
| 312 vmull.u32 q6,d28,d0[0] |
| 313 vld1.32 {d6-d7}, [r3]! |
| 314 vmull.u32 q7,d28,d0[1] |
| 315 vst1.64 {q10-q11}, [r7,:256]! |
| 316 vmull.u32 q8,d28,d1[0] |
| 317 vmull.u32 q9,d28,d1[1] |
| 318 vst1.64 {q12-q13}, [r7,:256]! |
| 319 |
| 320 vmull.u32 q10,d28,d2[0] |
| 321 vmull.u32 q11,d28,d2[1] |
| 322 vmull.u32 q12,d28,d3[0] |
| 323 vmull.u32 q13,d28,d3[1] |
| 324 |
| 325 bne .LNEON_1st |
| 326 |
| 327 vmlal.u32 q6,d29,d4[0] |
| 328 add r6,sp,#16 |
| 329 vmlal.u32 q7,d29,d4[1] |
| 330 sub r1,r1,r5,lsl#2 @ rewind r1 |
| 331 vmlal.u32 q8,d29,d5[0] |
| 332 vld1.64 {q5}, [sp,:128] |
| 333 vmlal.u32 q9,d29,d5[1] |
| 334 sub r9,r5,#1 |
| 335 |
| 336 vmlal.u32 q10,d29,d6[0] |
| 337 vst1.64 {q6-q7}, [r7,:256]! |
| 338 vmlal.u32 q11,d29,d6[1] |
| 339 vshr.u64 d10,d10,#16 |
| 340 vld1.64 {q6}, [r6, :128]! |
| 341 vmlal.u32 q12,d29,d7[0] |
| 342 vst1.64 {q8-q9}, [r7,:256]! |
| 343 vmlal.u32 q13,d29,d7[1] |
| 344 |
| 345 vst1.64 {q10-q11}, [r7,:256]! |
| 346 vadd.u64 d10,d10,d11 |
| 347 veor q4,q4,q4 |
| 348 vst1.64 {q12-q13}, [r7,:256]! |
| 349 vld1.64 {q7-q8}, [r6, :256]! |
| 350 vst1.64 {q4}, [r7,:128] |
| 351 vshr.u64 d10,d10,#16 |
| 352 |
| 353 b .LNEON_outer |
| 354 |
| 355 .align 4 |
| 356 .LNEON_outer: |
| 357 vld1.32 {d28[0]}, [r2,:32]! |
| 358 sub r3,r3,r5,lsl#2 @ rewind r3 |
| 359 vld1.32 {d0-d3}, [r1]! |
| 360 veor d8,d8,d8 |
| 361 mov r7,sp |
| 362 vzip.16 d28,d8 |
| 363 sub r8,r5,#8 |
| 364 vadd.u64 d12,d12,d10 |
| 365 |
| 366 vmlal.u32 q6,d28,d0[0] |
| 367 vld1.64 {q9-q10},[r6,:256]! |
| 368 vmlal.u32 q7,d28,d0[1] |
| 369 vmlal.u32 q8,d28,d1[0] |
| 370 vld1.64 {q11-q12},[r6,:256]! |
| 371 vmlal.u32 q9,d28,d1[1] |
| 372 |
| 373 vshl.i64 d10,d13,#16 |
| 374 veor d8,d8,d8 |
| 375 vadd.u64 d10,d10,d12 |
| 376 vld1.64 {q13},[r6,:128]! |
| 377 vmul.u32 d29,d10,d30 |
| 378 |
| 379 vmlal.u32 q10,d28,d2[0] |
| 380 vld1.32 {d4-d7}, [r3]! |
| 381 vmlal.u32 q11,d28,d2[1] |
| 382 vmlal.u32 q12,d28,d3[0] |
| 383 vzip.16 d29,d8 |
| 384 vmlal.u32 q13,d28,d3[1] |
| 385 |
| 386 .LNEON_inner: |
| 387 vmlal.u32 q6,d29,d4[0] |
| 388 vld1.32 {d0-d3}, [r1]! |
| 389 vmlal.u32 q7,d29,d4[1] |
| 390 subs r8,r8,#8 |
| 391 vmlal.u32 q8,d29,d5[0] |
| 392 vmlal.u32 q9,d29,d5[1] |
| 393 vst1.64 {q6-q7}, [r7,:256]! |
| 394 |
| 395 vmlal.u32 q10,d29,d6[0] |
| 396 vld1.64 {q6}, [r6, :128]! |
| 397 vmlal.u32 q11,d29,d6[1] |
| 398 vst1.64 {q8-q9}, [r7,:256]! |
| 399 vmlal.u32 q12,d29,d7[0] |
| 400 vld1.64 {q7-q8}, [r6, :256]! |
| 401 vmlal.u32 q13,d29,d7[1] |
| 402 vst1.64 {q10-q11}, [r7,:256]! |
| 403 |
| 404 vmlal.u32 q6,d28,d0[0] |
| 405 vld1.64 {q9-q10}, [r6, :256]! |
| 406 vmlal.u32 q7,d28,d0[1] |
| 407 vst1.64 {q12-q13}, [r7,:256]! |
| 408 vmlal.u32 q8,d28,d1[0] |
| 409 vld1.64 {q11-q12}, [r6, :256]! |
| 410 vmlal.u32 q9,d28,d1[1] |
| 411 vld1.32 {d4-d7}, [r3]! |
| 412 |
| 413 vmlal.u32 q10,d28,d2[0] |
| 414 vld1.64 {q13}, [r6, :128]! |
| 415 vmlal.u32 q11,d28,d2[1] |
| 416 vmlal.u32 q12,d28,d3[0] |
| 417 vmlal.u32 q13,d28,d3[1] |
| 418 |
| 419 bne .LNEON_inner |
| 420 |
| 421 vmlal.u32 q6,d29,d4[0] |
| 422 add r6,sp,#16 |
| 423 vmlal.u32 q7,d29,d4[1] |
| 424 sub r1,r1,r5,lsl#2 @ rewind r1 |
| 425 vmlal.u32 q8,d29,d5[0] |
| 426 vld1.64 {q5}, [sp,:128] |
| 427 vmlal.u32 q9,d29,d5[1] |
| 428 subs r9,r9,#1 |
| 429 |
| 430 vmlal.u32 q10,d29,d6[0] |
| 431 vst1.64 {q6-q7}, [r7,:256]! |
| 432 vmlal.u32 q11,d29,d6[1] |
| 433 vld1.64 {q6}, [r6, :128]! |
| 434 vshr.u64 d10,d10,#16 |
| 435 vst1.64 {q8-q9}, [r7,:256]! |
| 436 vmlal.u32 q12,d29,d7[0] |
| 437 vld1.64 {q7-q8}, [r6, :256]! |
| 438 vmlal.u32 q13,d29,d7[1] |
| 439 |
| 440 vst1.64 {q10-q11}, [r7,:256]! |
| 441 vadd.u64 d10,d10,d11 |
| 442 vst1.64 {q12-q13}, [r7,:256]! |
| 443 vshr.u64 d10,d10,#16 |
| 444 |
| 445 bne .LNEON_outer |
| 446 |
| 447 mov r7,sp |
| 448 mov r8,r5 |
| 449 |
| 450 .LNEON_tail: |
| 451 vadd.u64 d12,d12,d10 |
| 452 vld1.64 {q9-q10}, [r6, :256]! |
| 453 vshr.u64 d10,d12,#16 |
| 454 vadd.u64 d13,d13,d10 |
| 455 vld1.64 {q11-q12}, [r6, :256]! |
| 456 vshr.u64 d10,d13,#16 |
| 457 vld1.64 {q13}, [r6, :128]! |
| 458 vzip.16 d12,d13 |
| 459 |
| 460 .LNEON_tail2: |
| 461 vadd.u64 d14,d14,d10 |
| 462 vst1.32 {d12[0]}, [r7, :32]! |
| 463 vshr.u64 d10,d14,#16 |
| 464 vadd.u64 d15,d15,d10 |
| 465 vshr.u64 d10,d15,#16 |
| 466 vzip.16 d14,d15 |
| 467 |
| 468 vadd.u64 d16,d16,d10 |
| 469 vst1.32 {d14[0]}, [r7, :32]! |
| 470 vshr.u64 d10,d16,#16 |
| 471 vadd.u64 d17,d17,d10 |
| 472 vshr.u64 d10,d17,#16 |
| 473 vzip.16 d16,d17 |
| 474 |
| 475 vadd.u64 d18,d18,d10 |
| 476 vst1.32 {d16[0]}, [r7, :32]! |
| 477 vshr.u64 d10,d18,#16 |
| 478 vadd.u64 d19,d19,d10 |
| 479 vshr.u64 d10,d19,#16 |
| 480 vzip.16 d18,d19 |
| 481 |
| 482 vadd.u64 d20,d20,d10 |
| 483 vst1.32 {d18[0]}, [r7, :32]! |
| 484 vshr.u64 d10,d20,#16 |
| 485 vadd.u64 d21,d21,d10 |
| 486 vshr.u64 d10,d21,#16 |
| 487 vzip.16 d20,d21 |
| 488 |
| 489 vadd.u64 d22,d22,d10 |
| 490 vst1.32 {d20[0]}, [r7, :32]! |
| 491 vshr.u64 d10,d22,#16 |
| 492 vadd.u64 d23,d23,d10 |
| 493 vshr.u64 d10,d23,#16 |
| 494 vzip.16 d22,d23 |
| 495 |
| 496 vadd.u64 d24,d24,d10 |
| 497 vst1.32 {d22[0]}, [r7, :32]! |
| 498 vshr.u64 d10,d24,#16 |
| 499 vadd.u64 d25,d25,d10 |
| 500 vld1.64 {q6}, [r6, :128]! |
| 501 vshr.u64 d10,d25,#16 |
| 502 vzip.16 d24,d25 |
| 503 |
| 504 vadd.u64 d26,d26,d10 |
| 505 vst1.32 {d24[0]}, [r7, :32]! |
| 506 vshr.u64 d10,d26,#16 |
| 507 vadd.u64 d27,d27,d10 |
| 508 vld1.64 {q7-q8}, [r6, :256]! |
| 509 vshr.u64 d10,d27,#16 |
| 510 vzip.16 d26,d27 |
| 511 subs r8,r8,#8 |
| 512 vst1.32 {d26[0]}, [r7, :32]! |
| 513 |
| 514 bne .LNEON_tail |
| 515 |
| 516 vst1.32 {d10[0]}, [r7, :32] @ top-most bit |
| 517 sub r3,r3,r5,lsl#2 @ rewind r3 |
| 518 subs r1,sp,#0 @ clear carry flag |
| 519 add r2,sp,r5,lsl#2 |
| 520 |
| 521 .LNEON_sub: |
| 522 ldmia r1!, {r4-r7} |
| 523 ldmia r3!, {r8-r11} |
| 524 sbcs r8, r4,r8 |
| 525 sbcs r9, r5,r9 |
| 526 sbcs r10,r6,r10 |
| 527 sbcs r11,r7,r11 |
| 528 teq r1,r2 @ preserves carry |
| 529 stmia r0!, {r8-r11} |
| 530 bne .LNEON_sub |
| 531 |
| 532 ldr r10, [r1] @ load top-most bit |
| 533 veor q0,q0,q0 |
| 534 sub r11,r2,sp @ this is num*4 |
| 535 veor q1,q1,q1 |
| 536 mov r1,sp |
| 537 sub r0,r0,r11 @ rewind r0 |
| 538 mov r3,r2 @ second 3/4th of frame |
| 539 sbcs r10,r10,#0 @ result is carry flag |
| 540 |
| 541 .LNEON_copy_n_zap: |
| 542 ldmia r1!, {r4-r7} |
| 543 ldmia r0, {r8-r11} |
| 544 movcc r8, r4 |
| 545 vst1.64 {q0-q1}, [r3,:256]! @ wipe |
| 546 movcc r9, r5 |
| 547 movcc r10,r6 |
| 548 vst1.64 {q0-q1}, [r3,:256]! @ wipe |
| 549 movcc r11,r7 |
| 550 ldmia r1, {r4-r7} |
| 551 stmia r0!, {r8-r11} |
| 552 sub r1,r1,#16 |
| 553 ldmia r0, {r8-r11} |
| 554 movcc r8, r4 |
| 555 vst1.64 {q0-q1}, [r1,:256]! @ wipe |
| 556 movcc r9, r5 |
| 557 movcc r10,r6 |
| 558 vst1.64 {q0-q1}, [r3,:256]! @ wipe |
| 559 movcc r11,r7 |
| 560 teq r1,r2 @ preserves carry |
| 561 stmia r0!, {r8-r11} |
| 562 bne .LNEON_copy_n_zap |
| 563 |
| 564 sub sp,ip,#96 |
| 565 vldmia sp!,{d8-d15} |
| 566 ldmia sp!,{r4-r11} |
| 567 .word 0xe12fff1e |
| 568 .size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon |
| 569 #endif |
| 570 .asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.
org>" |
| 571 .align 2 |
| 572 #if __ARM_ARCH__>=7 |
| 573 .comm OPENSSL_armcap_P,4,4 |
| 574 #endif |
OLD | NEW |