OLD | NEW |
(Empty) | |
| 1 #if defined(__arm__) |
| 2 #include "arm_arch.h" |
| 3 |
| 4 .text |
| 5 .code 32 |
| 6 |
| 7 .type K256,%object |
| 8 .align 5 |
| 9 K256: |
| 10 .word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 |
| 11 .word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 |
| 12 .word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 |
| 13 .word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 |
| 14 .word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc |
| 15 .word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da |
| 16 .word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 |
| 17 .word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 |
| 18 .word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 |
| 19 .word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 |
| 20 .word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 |
| 21 .word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 |
| 22 .word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 |
| 23 .word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 |
| 24 .word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 |
| 25 .word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 |
| 26 .size K256,.-K256 |
| 27 .word 0 @ terminator |
| 28 .LOPENSSL_armcap: |
| 29 .word OPENSSL_armcap_P-sha256_block_data_order |
| 30 .align 5 |
| 31 |
| 32 .global sha256_block_data_order |
| 33 .type sha256_block_data_order,%function |
| 34 sha256_block_data_order: |
| 35 sub r3,pc,#8 @ sha256_block_data_order |
| 36 add r2,r1,r2,lsl#6 @ len to point at the end of inp |
| 37 #if __ARM_ARCH__>=7 |
| 38 ldr r12,.LOPENSSL_armcap |
| 39 ldr r12,[r3,r12] @ OPENSSL_armcap_P |
| 40 tst r12,#1 |
| 41 bne .LNEON |
| 42 #endif |
| 43 stmdb sp!,{r0,r1,r2,r4-r11,lr} |
| 44 ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} |
| 45 sub r14,r3,#256+32 @ K256 |
| 46 sub sp,sp,#16*4 @ alloca(X[16]) |
| 47 .Loop: |
| 48 # if __ARM_ARCH__>=7 |
| 49 ldr r2,[r1],#4 |
| 50 # else |
| 51 ldrb r2,[r1,#3] |
| 52 # endif |
| 53 eor r3,r5,r6 @ magic |
| 54 eor r12,r12,r12 |
| 55 #if __ARM_ARCH__>=7 |
| 56 @ ldr r2,[r1],#4 @ 0 |
| 57 # if 0==15 |
| 58 str r1,[sp,#17*4] @ make room for r1 |
| 59 # endif |
| 60 eor r0,r8,r8,ror#5 |
| 61 add r4,r4,r12 @ h+=Maj(a,b,c) from the past |
| 62 eor r0,r0,r8,ror#19 @ Sigma1(e) |
| 63 rev r2,r2 |
| 64 #else |
| 65 @ ldrb r2,[r1,#3] @ 0 |
| 66 add r4,r4,r12 @ h+=Maj(a,b,c) from the past |
| 67 ldrb r12,[r1,#2] |
| 68 ldrb r0,[r1,#1] |
| 69 orr r2,r2,r12,lsl#8 |
| 70 ldrb r12,[r1],#4 |
| 71 orr r2,r2,r0,lsl#16 |
| 72 # if 0==15 |
| 73 str r1,[sp,#17*4] @ make room for r1 |
| 74 # endif |
| 75 eor r0,r8,r8,ror#5 |
| 76 orr r2,r2,r12,lsl#24 |
| 77 eor r0,r0,r8,ror#19 @ Sigma1(e) |
| 78 #endif |
| 79 ldr r12,[r14],#4 @ *K256++ |
| 80 add r11,r11,r2 @ h+=X[i] |
| 81 str r2,[sp,#0*4] |
| 82 eor r2,r9,r10 |
| 83 add r11,r11,r0,ror#6 @ h+=Sigma1(e) |
| 84 and r2,r2,r8 |
| 85 add r11,r11,r12 @ h+=K256[i] |
| 86 eor r2,r2,r10 @ Ch(e,f,g) |
| 87 eor r0,r4,r4,ror#11 |
| 88 add r11,r11,r2 @ h+=Ch(e,f,g) |
| 89 #if 0==31 |
| 90 and r12,r12,#0xff |
| 91 cmp r12,#0xf2 @ done? |
| 92 #endif |
| 93 #if 0<15 |
| 94 # if __ARM_ARCH__>=7 |
| 95 ldr r2,[r1],#4 @ prefetch |
| 96 # else |
| 97 ldrb r2,[r1,#3] |
| 98 # endif |
| 99 eor r12,r4,r5 @ a^b, b^c in next round |
| 100 #else |
| 101 ldr r2,[sp,#2*4] @ from future BODY_16_xx |
| 102 eor r12,r4,r5 @ a^b, b^c in next round |
| 103 ldr r1,[sp,#15*4] @ from future BODY_16_xx |
| 104 #endif |
| 105 eor r0,r0,r4,ror#20 @ Sigma0(a) |
| 106 and r3,r3,r12 @ (b^c)&=(a^b) |
| 107 add r7,r7,r11 @ d+=h |
| 108 eor r3,r3,r5 @ Maj(a,b,c) |
| 109 add r11,r11,r0,ror#2 @ h+=Sigma0(a) |
| 110 @ add r11,r11,r3 @ h+=Maj(a,b,c) |
| 111 #if __ARM_ARCH__>=7 |
| 112 @ ldr r2,[r1],#4 @ 1 |
| 113 # if 1==15 |
| 114 str r1,[sp,#17*4] @ make room for r1 |
| 115 # endif |
| 116 eor r0,r7,r7,ror#5 |
| 117 add r11,r11,r3 @ h+=Maj(a,b,c) from the past |
| 118 eor r0,r0,r7,ror#19 @ Sigma1(e) |
| 119 rev r2,r2 |
| 120 #else |
| 121 @ ldrb r2,[r1,#3] @ 1 |
| 122 add r11,r11,r3 @ h+=Maj(a,b,c) from the past |
| 123 ldrb r3,[r1,#2] |
| 124 ldrb r0,[r1,#1] |
| 125 orr r2,r2,r3,lsl#8 |
| 126 ldrb r3,[r1],#4 |
| 127 orr r2,r2,r0,lsl#16 |
| 128 # if 1==15 |
| 129 str r1,[sp,#17*4] @ make room for r1 |
| 130 # endif |
| 131 eor r0,r7,r7,ror#5 |
| 132 orr r2,r2,r3,lsl#24 |
| 133 eor r0,r0,r7,ror#19 @ Sigma1(e) |
| 134 #endif |
| 135 ldr r3,[r14],#4 @ *K256++ |
| 136 add r10,r10,r2 @ h+=X[i] |
| 137 str r2,[sp,#1*4] |
| 138 eor r2,r8,r9 |
| 139 add r10,r10,r0,ror#6 @ h+=Sigma1(e) |
| 140 and r2,r2,r7 |
| 141 add r10,r10,r3 @ h+=K256[i] |
| 142 eor r2,r2,r9 @ Ch(e,f,g) |
| 143 eor r0,r11,r11,ror#11 |
| 144 add r10,r10,r2 @ h+=Ch(e,f,g) |
| 145 #if 1==31 |
| 146 and r3,r3,#0xff |
| 147 cmp r3,#0xf2 @ done? |
| 148 #endif |
| 149 #if 1<15 |
| 150 # if __ARM_ARCH__>=7 |
| 151 ldr r2,[r1],#4 @ prefetch |
| 152 # else |
| 153 ldrb r2,[r1,#3] |
| 154 # endif |
| 155 eor r3,r11,r4 @ a^b, b^c in next round |
| 156 #else |
| 157 ldr r2,[sp,#3*4] @ from future BODY_16_xx |
| 158 eor r3,r11,r4 @ a^b, b^c in next round |
| 159 ldr r1,[sp,#0*4] @ from future BODY_16_xx |
| 160 #endif |
| 161 eor r0,r0,r11,ror#20 @ Sigma0(a) |
| 162 and r12,r12,r3 @ (b^c)&=(a^b) |
| 163 add r6,r6,r10 @ d+=h |
| 164 eor r12,r12,r4 @ Maj(a,b,c) |
| 165 add r10,r10,r0,ror#2 @ h+=Sigma0(a) |
| 166 @ add r10,r10,r12 @ h+=Maj(a,b,c) |
| 167 #if __ARM_ARCH__>=7 |
| 168 @ ldr r2,[r1],#4 @ 2 |
| 169 # if 2==15 |
| 170 str r1,[sp,#17*4] @ make room for r1 |
| 171 # endif |
| 172 eor r0,r6,r6,ror#5 |
| 173 add r10,r10,r12 @ h+=Maj(a,b,c) from the past |
| 174 eor r0,r0,r6,ror#19 @ Sigma1(e) |
| 175 rev r2,r2 |
| 176 #else |
| 177 @ ldrb r2,[r1,#3] @ 2 |
| 178 add r10,r10,r12 @ h+=Maj(a,b,c) from the past |
| 179 ldrb r12,[r1,#2] |
| 180 ldrb r0,[r1,#1] |
| 181 orr r2,r2,r12,lsl#8 |
| 182 ldrb r12,[r1],#4 |
| 183 orr r2,r2,r0,lsl#16 |
| 184 # if 2==15 |
| 185 str r1,[sp,#17*4] @ make room for r1 |
| 186 # endif |
| 187 eor r0,r6,r6,ror#5 |
| 188 orr r2,r2,r12,lsl#24 |
| 189 eor r0,r0,r6,ror#19 @ Sigma1(e) |
| 190 #endif |
| 191 ldr r12,[r14],#4 @ *K256++ |
| 192 add r9,r9,r2 @ h+=X[i] |
| 193 str r2,[sp,#2*4] |
| 194 eor r2,r7,r8 |
| 195 add r9,r9,r0,ror#6 @ h+=Sigma1(e) |
| 196 and r2,r2,r6 |
| 197 add r9,r9,r12 @ h+=K256[i] |
| 198 eor r2,r2,r8 @ Ch(e,f,g) |
| 199 eor r0,r10,r10,ror#11 |
| 200 add r9,r9,r2 @ h+=Ch(e,f,g) |
| 201 #if 2==31 |
| 202 and r12,r12,#0xff |
| 203 cmp r12,#0xf2 @ done? |
| 204 #endif |
| 205 #if 2<15 |
| 206 # if __ARM_ARCH__>=7 |
| 207 ldr r2,[r1],#4 @ prefetch |
| 208 # else |
| 209 ldrb r2,[r1,#3] |
| 210 # endif |
| 211 eor r12,r10,r11 @ a^b, b^c in next round |
| 212 #else |
| 213 ldr r2,[sp,#4*4] @ from future BODY_16_xx |
| 214 eor r12,r10,r11 @ a^b, b^c in next round |
| 215 ldr r1,[sp,#1*4] @ from future BODY_16_xx |
| 216 #endif |
| 217 eor r0,r0,r10,ror#20 @ Sigma0(a) |
| 218 and r3,r3,r12 @ (b^c)&=(a^b) |
| 219 add r5,r5,r9 @ d+=h |
| 220 eor r3,r3,r11 @ Maj(a,b,c) |
| 221 add r9,r9,r0,ror#2 @ h+=Sigma0(a) |
| 222 @ add r9,r9,r3 @ h+=Maj(a,b,c) |
| 223 #if __ARM_ARCH__>=7 |
| 224 @ ldr r2,[r1],#4 @ 3 |
| 225 # if 3==15 |
| 226 str r1,[sp,#17*4] @ make room for r1 |
| 227 # endif |
| 228 eor r0,r5,r5,ror#5 |
| 229 add r9,r9,r3 @ h+=Maj(a,b,c) from the past |
| 230 eor r0,r0,r5,ror#19 @ Sigma1(e) |
| 231 rev r2,r2 |
| 232 #else |
| 233 @ ldrb r2,[r1,#3] @ 3 |
| 234 add r9,r9,r3 @ h+=Maj(a,b,c) from the past |
| 235 ldrb r3,[r1,#2] |
| 236 ldrb r0,[r1,#1] |
| 237 orr r2,r2,r3,lsl#8 |
| 238 ldrb r3,[r1],#4 |
| 239 orr r2,r2,r0,lsl#16 |
| 240 # if 3==15 |
| 241 str r1,[sp,#17*4] @ make room for r1 |
| 242 # endif |
| 243 eor r0,r5,r5,ror#5 |
| 244 orr r2,r2,r3,lsl#24 |
| 245 eor r0,r0,r5,ror#19 @ Sigma1(e) |
| 246 #endif |
| 247 ldr r3,[r14],#4 @ *K256++ |
| 248 add r8,r8,r2 @ h+=X[i] |
| 249 str r2,[sp,#3*4] |
| 250 eor r2,r6,r7 |
| 251 add r8,r8,r0,ror#6 @ h+=Sigma1(e) |
| 252 and r2,r2,r5 |
| 253 add r8,r8,r3 @ h+=K256[i] |
| 254 eor r2,r2,r7 @ Ch(e,f,g) |
| 255 eor r0,r9,r9,ror#11 |
| 256 add r8,r8,r2 @ h+=Ch(e,f,g) |
| 257 #if 3==31 |
| 258 and r3,r3,#0xff |
| 259 cmp r3,#0xf2 @ done? |
| 260 #endif |
| 261 #if 3<15 |
| 262 # if __ARM_ARCH__>=7 |
| 263 ldr r2,[r1],#4 @ prefetch |
| 264 # else |
| 265 ldrb r2,[r1,#3] |
| 266 # endif |
| 267 eor r3,r9,r10 @ a^b, b^c in next round |
| 268 #else |
| 269 ldr r2,[sp,#5*4] @ from future BODY_16_xx |
| 270 eor r3,r9,r10 @ a^b, b^c in next round |
| 271 ldr r1,[sp,#2*4] @ from future BODY_16_xx |
| 272 #endif |
| 273 eor r0,r0,r9,ror#20 @ Sigma0(a) |
| 274 and r12,r12,r3 @ (b^c)&=(a^b) |
| 275 add r4,r4,r8 @ d+=h |
| 276 eor r12,r12,r10 @ Maj(a,b,c) |
| 277 add r8,r8,r0,ror#2 @ h+=Sigma0(a) |
| 278 @ add r8,r8,r12 @ h+=Maj(a,b,c) |
| 279 #if __ARM_ARCH__>=7 |
| 280 @ ldr r2,[r1],#4 @ 4 |
| 281 # if 4==15 |
| 282 str r1,[sp,#17*4] @ make room for r1 |
| 283 # endif |
| 284 eor r0,r4,r4,ror#5 |
| 285 add r8,r8,r12 @ h+=Maj(a,b,c) from the past |
| 286 eor r0,r0,r4,ror#19 @ Sigma1(e) |
| 287 rev r2,r2 |
| 288 #else |
| 289 @ ldrb r2,[r1,#3] @ 4 |
| 290 add r8,r8,r12 @ h+=Maj(a,b,c) from the past |
| 291 ldrb r12,[r1,#2] |
| 292 ldrb r0,[r1,#1] |
| 293 orr r2,r2,r12,lsl#8 |
| 294 ldrb r12,[r1],#4 |
| 295 orr r2,r2,r0,lsl#16 |
| 296 # if 4==15 |
| 297 str r1,[sp,#17*4] @ make room for r1 |
| 298 # endif |
| 299 eor r0,r4,r4,ror#5 |
| 300 orr r2,r2,r12,lsl#24 |
| 301 eor r0,r0,r4,ror#19 @ Sigma1(e) |
| 302 #endif |
| 303 ldr r12,[r14],#4 @ *K256++ |
| 304 add r7,r7,r2 @ h+=X[i] |
| 305 str r2,[sp,#4*4] |
| 306 eor r2,r5,r6 |
| 307 add r7,r7,r0,ror#6 @ h+=Sigma1(e) |
| 308 and r2,r2,r4 |
| 309 add r7,r7,r12 @ h+=K256[i] |
| 310 eor r2,r2,r6 @ Ch(e,f,g) |
| 311 eor r0,r8,r8,ror#11 |
| 312 add r7,r7,r2 @ h+=Ch(e,f,g) |
| 313 #if 4==31 |
| 314 and r12,r12,#0xff |
| 315 cmp r12,#0xf2 @ done? |
| 316 #endif |
| 317 #if 4<15 |
| 318 # if __ARM_ARCH__>=7 |
| 319 ldr r2,[r1],#4 @ prefetch |
| 320 # else |
| 321 ldrb r2,[r1,#3] |
| 322 # endif |
| 323 eor r12,r8,r9 @ a^b, b^c in next round |
| 324 #else |
| 325 ldr r2,[sp,#6*4] @ from future BODY_16_xx |
| 326 eor r12,r8,r9 @ a^b, b^c in next round |
| 327 ldr r1,[sp,#3*4] @ from future BODY_16_xx |
| 328 #endif |
| 329 eor r0,r0,r8,ror#20 @ Sigma0(a) |
| 330 and r3,r3,r12 @ (b^c)&=(a^b) |
| 331 add r11,r11,r7 @ d+=h |
| 332 eor r3,r3,r9 @ Maj(a,b,c) |
| 333 add r7,r7,r0,ror#2 @ h+=Sigma0(a) |
| 334 @ add r7,r7,r3 @ h+=Maj(a,b,c) |
| 335 #if __ARM_ARCH__>=7 |
| 336 @ ldr r2,[r1],#4 @ 5 |
| 337 # if 5==15 |
| 338 str r1,[sp,#17*4] @ make room for r1 |
| 339 # endif |
| 340 eor r0,r11,r11,ror#5 |
| 341 add r7,r7,r3 @ h+=Maj(a,b,c) from the past |
| 342 eor r0,r0,r11,ror#19 @ Sigma1(e) |
| 343 rev r2,r2 |
| 344 #else |
| 345 @ ldrb r2,[r1,#3] @ 5 |
| 346 add r7,r7,r3 @ h+=Maj(a,b,c) from the past |
| 347 ldrb r3,[r1,#2] |
| 348 ldrb r0,[r1,#1] |
| 349 orr r2,r2,r3,lsl#8 |
| 350 ldrb r3,[r1],#4 |
| 351 orr r2,r2,r0,lsl#16 |
| 352 # if 5==15 |
| 353 str r1,[sp,#17*4] @ make room for r1 |
| 354 # endif |
| 355 eor r0,r11,r11,ror#5 |
| 356 orr r2,r2,r3,lsl#24 |
| 357 eor r0,r0,r11,ror#19 @ Sigma1(e) |
| 358 #endif |
| 359 ldr r3,[r14],#4 @ *K256++ |
| 360 add r6,r6,r2 @ h+=X[i] |
| 361 str r2,[sp,#5*4] |
| 362 eor r2,r4,r5 |
| 363 add r6,r6,r0,ror#6 @ h+=Sigma1(e) |
| 364 and r2,r2,r11 |
| 365 add r6,r6,r3 @ h+=K256[i] |
| 366 eor r2,r2,r5 @ Ch(e,f,g) |
| 367 eor r0,r7,r7,ror#11 |
| 368 add r6,r6,r2 @ h+=Ch(e,f,g) |
| 369 #if 5==31 |
| 370 and r3,r3,#0xff |
| 371 cmp r3,#0xf2 @ done? |
| 372 #endif |
| 373 #if 5<15 |
| 374 # if __ARM_ARCH__>=7 |
| 375 ldr r2,[r1],#4 @ prefetch |
| 376 # else |
| 377 ldrb r2,[r1,#3] |
| 378 # endif |
| 379 eor r3,r7,r8 @ a^b, b^c in next round |
| 380 #else |
| 381 ldr r2,[sp,#7*4] @ from future BODY_16_xx |
| 382 eor r3,r7,r8 @ a^b, b^c in next round |
| 383 ldr r1,[sp,#4*4] @ from future BODY_16_xx |
| 384 #endif |
| 385 eor r0,r0,r7,ror#20 @ Sigma0(a) |
| 386 and r12,r12,r3 @ (b^c)&=(a^b) |
| 387 add r10,r10,r6 @ d+=h |
| 388 eor r12,r12,r8 @ Maj(a,b,c) |
| 389 add r6,r6,r0,ror#2 @ h+=Sigma0(a) |
| 390 @ add r6,r6,r12 @ h+=Maj(a,b,c) |
| 391 #if __ARM_ARCH__>=7 |
| 392 @ ldr r2,[r1],#4 @ 6 |
| 393 # if 6==15 |
| 394 str r1,[sp,#17*4] @ make room for r1 |
| 395 # endif |
| 396 eor r0,r10,r10,ror#5 |
| 397 add r6,r6,r12 @ h+=Maj(a,b,c) from the past |
| 398 eor r0,r0,r10,ror#19 @ Sigma1(e) |
| 399 rev r2,r2 |
| 400 #else |
| 401 @ ldrb r2,[r1,#3] @ 6 |
| 402 add r6,r6,r12 @ h+=Maj(a,b,c) from the past |
| 403 ldrb r12,[r1,#2] |
| 404 ldrb r0,[r1,#1] |
| 405 orr r2,r2,r12,lsl#8 |
| 406 ldrb r12,[r1],#4 |
| 407 orr r2,r2,r0,lsl#16 |
| 408 # if 6==15 |
| 409 str r1,[sp,#17*4] @ make room for r1 |
| 410 # endif |
| 411 eor r0,r10,r10,ror#5 |
| 412 orr r2,r2,r12,lsl#24 |
| 413 eor r0,r0,r10,ror#19 @ Sigma1(e) |
| 414 #endif |
| 415 ldr r12,[r14],#4 @ *K256++ |
| 416 add r5,r5,r2 @ h+=X[i] |
| 417 str r2,[sp,#6*4] |
| 418 eor r2,r11,r4 |
| 419 add r5,r5,r0,ror#6 @ h+=Sigma1(e) |
| 420 and r2,r2,r10 |
| 421 add r5,r5,r12 @ h+=K256[i] |
| 422 eor r2,r2,r4 @ Ch(e,f,g) |
| 423 eor r0,r6,r6,ror#11 |
| 424 add r5,r5,r2 @ h+=Ch(e,f,g) |
| 425 #if 6==31 |
| 426 and r12,r12,#0xff |
| 427 cmp r12,#0xf2 @ done? |
| 428 #endif |
| 429 #if 6<15 |
| 430 # if __ARM_ARCH__>=7 |
| 431 ldr r2,[r1],#4 @ prefetch |
| 432 # else |
| 433 ldrb r2,[r1,#3] |
| 434 # endif |
| 435 eor r12,r6,r7 @ a^b, b^c in next round |
| 436 #else |
| 437 ldr r2,[sp,#8*4] @ from future BODY_16_xx |
| 438 eor r12,r6,r7 @ a^b, b^c in next round |
| 439 ldr r1,[sp,#5*4] @ from future BODY_16_xx |
| 440 #endif |
| 441 eor r0,r0,r6,ror#20 @ Sigma0(a) |
| 442 and r3,r3,r12 @ (b^c)&=(a^b) |
| 443 add r9,r9,r5 @ d+=h |
| 444 eor r3,r3,r7 @ Maj(a,b,c) |
| 445 add r5,r5,r0,ror#2 @ h+=Sigma0(a) |
| 446 @ add r5,r5,r3 @ h+=Maj(a,b,c) |
| 447 #if __ARM_ARCH__>=7 |
| 448 @ ldr r2,[r1],#4 @ 7 |
| 449 # if 7==15 |
| 450 str r1,[sp,#17*4] @ make room for r1 |
| 451 # endif |
| 452 eor r0,r9,r9,ror#5 |
| 453 add r5,r5,r3 @ h+=Maj(a,b,c) from the past |
| 454 eor r0,r0,r9,ror#19 @ Sigma1(e) |
| 455 rev r2,r2 |
| 456 #else |
| 457 @ ldrb r2,[r1,#3] @ 7 |
| 458 add r5,r5,r3 @ h+=Maj(a,b,c) from the past |
| 459 ldrb r3,[r1,#2] |
| 460 ldrb r0,[r1,#1] |
| 461 orr r2,r2,r3,lsl#8 |
| 462 ldrb r3,[r1],#4 |
| 463 orr r2,r2,r0,lsl#16 |
| 464 # if 7==15 |
| 465 str r1,[sp,#17*4] @ make room for r1 |
| 466 # endif |
| 467 eor r0,r9,r9,ror#5 |
| 468 orr r2,r2,r3,lsl#24 |
| 469 eor r0,r0,r9,ror#19 @ Sigma1(e) |
| 470 #endif |
| 471 ldr r3,[r14],#4 @ *K256++ |
| 472 add r4,r4,r2 @ h+=X[i] |
| 473 str r2,[sp,#7*4] |
| 474 eor r2,r10,r11 |
| 475 add r4,r4,r0,ror#6 @ h+=Sigma1(e) |
| 476 and r2,r2,r9 |
| 477 add r4,r4,r3 @ h+=K256[i] |
| 478 eor r2,r2,r11 @ Ch(e,f,g) |
| 479 eor r0,r5,r5,ror#11 |
| 480 add r4,r4,r2 @ h+=Ch(e,f,g) |
| 481 #if 7==31 |
| 482 and r3,r3,#0xff |
| 483 cmp r3,#0xf2 @ done? |
| 484 #endif |
| 485 #if 7<15 |
| 486 # if __ARM_ARCH__>=7 |
| 487 ldr r2,[r1],#4 @ prefetch |
| 488 # else |
| 489 ldrb r2,[r1,#3] |
| 490 # endif |
| 491 eor r3,r5,r6 @ a^b, b^c in next round |
| 492 #else |
| 493 ldr r2,[sp,#9*4] @ from future BODY_16_xx |
| 494 eor r3,r5,r6 @ a^b, b^c in next round |
| 495 ldr r1,[sp,#6*4] @ from future BODY_16_xx |
| 496 #endif |
| 497 eor r0,r0,r5,ror#20 @ Sigma0(a) |
| 498 and r12,r12,r3 @ (b^c)&=(a^b) |
| 499 add r8,r8,r4 @ d+=h |
| 500 eor r12,r12,r6 @ Maj(a,b,c) |
| 501 add r4,r4,r0,ror#2 @ h+=Sigma0(a) |
| 502 @ add r4,r4,r12 @ h+=Maj(a,b,c) |
| 503 #if __ARM_ARCH__>=7 |
| 504 @ ldr r2,[r1],#4 @ 8 |
| 505 # if 8==15 |
| 506 str r1,[sp,#17*4] @ make room for r1 |
| 507 # endif |
| 508 eor r0,r8,r8,ror#5 |
| 509 add r4,r4,r12 @ h+=Maj(a,b,c) from the past |
| 510 eor r0,r0,r8,ror#19 @ Sigma1(e) |
| 511 rev r2,r2 |
| 512 #else |
| 513 @ ldrb r2,[r1,#3] @ 8 |
| 514 add r4,r4,r12 @ h+=Maj(a,b,c) from the past |
| 515 ldrb r12,[r1,#2] |
| 516 ldrb r0,[r1,#1] |
| 517 orr r2,r2,r12,lsl#8 |
| 518 ldrb r12,[r1],#4 |
| 519 orr r2,r2,r0,lsl#16 |
| 520 # if 8==15 |
| 521 str r1,[sp,#17*4] @ make room for r1 |
| 522 # endif |
| 523 eor r0,r8,r8,ror#5 |
| 524 orr r2,r2,r12,lsl#24 |
| 525 eor r0,r0,r8,ror#19 @ Sigma1(e) |
| 526 #endif |
| 527 ldr r12,[r14],#4 @ *K256++ |
| 528 add r11,r11,r2 @ h+=X[i] |
| 529 str r2,[sp,#8*4] |
| 530 eor r2,r9,r10 |
| 531 add r11,r11,r0,ror#6 @ h+=Sigma1(e) |
| 532 and r2,r2,r8 |
| 533 add r11,r11,r12 @ h+=K256[i] |
| 534 eor r2,r2,r10 @ Ch(e,f,g) |
| 535 eor r0,r4,r4,ror#11 |
| 536 add r11,r11,r2 @ h+=Ch(e,f,g) |
| 537 #if 8==31 |
| 538 and r12,r12,#0xff |
| 539 cmp r12,#0xf2 @ done? |
| 540 #endif |
| 541 #if 8<15 |
| 542 # if __ARM_ARCH__>=7 |
| 543 ldr r2,[r1],#4 @ prefetch |
| 544 # else |
| 545 ldrb r2,[r1,#3] |
| 546 # endif |
| 547 eor r12,r4,r5 @ a^b, b^c in next round |
| 548 #else |
| 549 ldr r2,[sp,#10*4] @ from future BODY_16_xx |
| 550 eor r12,r4,r5 @ a^b, b^c in next round |
| 551 ldr r1,[sp,#7*4] @ from future BODY_16_xx |
| 552 #endif |
| 553 eor r0,r0,r4,ror#20 @ Sigma0(a) |
| 554 and r3,r3,r12 @ (b^c)&=(a^b) |
| 555 add r7,r7,r11 @ d+=h |
| 556 eor r3,r3,r5 @ Maj(a,b,c) |
| 557 add r11,r11,r0,ror#2 @ h+=Sigma0(a) |
| 558 @ add r11,r11,r3 @ h+=Maj(a,b,c) |
| 559 #if __ARM_ARCH__>=7 |
| 560 @ ldr r2,[r1],#4 @ 9 |
| 561 # if 9==15 |
| 562 str r1,[sp,#17*4] @ make room for r1 |
| 563 # endif |
| 564 eor r0,r7,r7,ror#5 |
| 565 add r11,r11,r3 @ h+=Maj(a,b,c) from the past |
| 566 eor r0,r0,r7,ror#19 @ Sigma1(e) |
| 567 rev r2,r2 |
| 568 #else |
| 569 @ ldrb r2,[r1,#3] @ 9 |
| 570 add r11,r11,r3 @ h+=Maj(a,b,c) from the past |
| 571 ldrb r3,[r1,#2] |
| 572 ldrb r0,[r1,#1] |
| 573 orr r2,r2,r3,lsl#8 |
| 574 ldrb r3,[r1],#4 |
| 575 orr r2,r2,r0,lsl#16 |
| 576 # if 9==15 |
| 577 str r1,[sp,#17*4] @ make room for r1 |
| 578 # endif |
| 579 eor r0,r7,r7,ror#5 |
| 580 orr r2,r2,r3,lsl#24 |
| 581 eor r0,r0,r7,ror#19 @ Sigma1(e) |
| 582 #endif |
| 583 ldr r3,[r14],#4 @ *K256++ |
| 584 add r10,r10,r2 @ h+=X[i] |
| 585 str r2,[sp,#9*4] |
| 586 eor r2,r8,r9 |
| 587 add r10,r10,r0,ror#6 @ h+=Sigma1(e) |
| 588 and r2,r2,r7 |
| 589 add r10,r10,r3 @ h+=K256[i] |
| 590 eor r2,r2,r9 @ Ch(e,f,g) |
| 591 eor r0,r11,r11,ror#11 |
| 592 add r10,r10,r2 @ h+=Ch(e,f,g) |
| 593 #if 9==31 |
| 594 and r3,r3,#0xff |
| 595 cmp r3,#0xf2 @ done? |
| 596 #endif |
| 597 #if 9<15 |
| 598 # if __ARM_ARCH__>=7 |
| 599 ldr r2,[r1],#4 @ prefetch |
| 600 # else |
| 601 ldrb r2,[r1,#3] |
| 602 # endif |
| 603 eor r3,r11,r4 @ a^b, b^c in next round |
| 604 #else |
| 605 ldr r2,[sp,#11*4] @ from future BODY_16_xx |
| 606 eor r3,r11,r4 @ a^b, b^c in next round |
| 607 ldr r1,[sp,#8*4] @ from future BODY_16_xx |
| 608 #endif |
| 609 eor r0,r0,r11,ror#20 @ Sigma0(a) |
| 610 and r12,r12,r3 @ (b^c)&=(a^b) |
| 611 add r6,r6,r10 @ d+=h |
| 612 eor r12,r12,r4 @ Maj(a,b,c) |
| 613 add r10,r10,r0,ror#2 @ h+=Sigma0(a) |
| 614 @ add r10,r10,r12 @ h+=Maj(a,b,c) |
| 615 #if __ARM_ARCH__>=7 |
| 616 @ ldr r2,[r1],#4 @ 10 |
| 617 # if 10==15 |
| 618 str r1,[sp,#17*4] @ make room for r1 |
| 619 # endif |
| 620 eor r0,r6,r6,ror#5 |
| 621 add r10,r10,r12 @ h+=Maj(a,b,c) from the past |
| 622 eor r0,r0,r6,ror#19 @ Sigma1(e) |
| 623 rev r2,r2 |
| 624 #else |
| 625 @ ldrb r2,[r1,#3] @ 10 |
| 626 add r10,r10,r12 @ h+=Maj(a,b,c) from the past |
| 627 ldrb r12,[r1,#2] |
| 628 ldrb r0,[r1,#1] |
| 629 orr r2,r2,r12,lsl#8 |
| 630 ldrb r12,[r1],#4 |
| 631 orr r2,r2,r0,lsl#16 |
| 632 # if 10==15 |
| 633 str r1,[sp,#17*4] @ make room for r1 |
| 634 # endif |
| 635 eor r0,r6,r6,ror#5 |
| 636 orr r2,r2,r12,lsl#24 |
| 637 eor r0,r0,r6,ror#19 @ Sigma1(e) |
| 638 #endif |
| 639 ldr r12,[r14],#4 @ *K256++ |
| 640 add r9,r9,r2 @ h+=X[i] |
| 641 str r2,[sp,#10*4] |
| 642 eor r2,r7,r8 |
| 643 add r9,r9,r0,ror#6 @ h+=Sigma1(e) |
| 644 and r2,r2,r6 |
| 645 add r9,r9,r12 @ h+=K256[i] |
| 646 eor r2,r2,r8 @ Ch(e,f,g) |
| 647 eor r0,r10,r10,ror#11 |
| 648 add r9,r9,r2 @ h+=Ch(e,f,g) |
| 649 #if 10==31 |
| 650 and r12,r12,#0xff |
| 651 cmp r12,#0xf2 @ done? |
| 652 #endif |
| 653 #if 10<15 |
| 654 # if __ARM_ARCH__>=7 |
| 655 ldr r2,[r1],#4 @ prefetch |
| 656 # else |
| 657 ldrb r2,[r1,#3] |
| 658 # endif |
| 659 eor r12,r10,r11 @ a^b, b^c in next round |
| 660 #else |
| 661 ldr r2,[sp,#12*4] @ from future BODY_16_xx |
| 662 eor r12,r10,r11 @ a^b, b^c in next round |
| 663 ldr r1,[sp,#9*4] @ from future BODY_16_xx |
| 664 #endif |
| 665 eor r0,r0,r10,ror#20 @ Sigma0(a) |
| 666 and r3,r3,r12 @ (b^c)&=(a^b) |
| 667 add r5,r5,r9 @ d+=h |
| 668 eor r3,r3,r11 @ Maj(a,b,c) |
| 669 add r9,r9,r0,ror#2 @ h+=Sigma0(a) |
| 670 @ add r9,r9,r3 @ h+=Maj(a,b,c) |
| 671 #if __ARM_ARCH__>=7 |
| 672 @ ldr r2,[r1],#4 @ 11 |
| 673 # if 11==15 |
| 674 str r1,[sp,#17*4] @ make room for r1 |
| 675 # endif |
| 676 eor r0,r5,r5,ror#5 |
| 677 add r9,r9,r3 @ h+=Maj(a,b,c) from the past |
| 678 eor r0,r0,r5,ror#19 @ Sigma1(e) |
| 679 rev r2,r2 |
| 680 #else |
| 681 @ ldrb r2,[r1,#3] @ 11 |
| 682 add r9,r9,r3 @ h+=Maj(a,b,c) from the past |
| 683 ldrb r3,[r1,#2] |
| 684 ldrb r0,[r1,#1] |
| 685 orr r2,r2,r3,lsl#8 |
| 686 ldrb r3,[r1],#4 |
| 687 orr r2,r2,r0,lsl#16 |
| 688 # if 11==15 |
| 689 str r1,[sp,#17*4] @ make room for r1 |
| 690 # endif |
| 691 eor r0,r5,r5,ror#5 |
| 692 orr r2,r2,r3,lsl#24 |
| 693 eor r0,r0,r5,ror#19 @ Sigma1(e) |
| 694 #endif |
| 695 ldr r3,[r14],#4 @ *K256++ |
| 696 add r8,r8,r2 @ h+=X[i] |
| 697 str r2,[sp,#11*4] |
| 698 eor r2,r6,r7 |
| 699 add r8,r8,r0,ror#6 @ h+=Sigma1(e) |
| 700 and r2,r2,r5 |
| 701 add r8,r8,r3 @ h+=K256[i] |
| 702 eor r2,r2,r7 @ Ch(e,f,g) |
| 703 eor r0,r9,r9,ror#11 |
| 704 add r8,r8,r2 @ h+=Ch(e,f,g) |
| 705 #if 11==31 |
| 706 and r3,r3,#0xff |
| 707 cmp r3,#0xf2 @ done? |
| 708 #endif |
| 709 #if 11<15 |
| 710 # if __ARM_ARCH__>=7 |
| 711 ldr r2,[r1],#4 @ prefetch |
| 712 # else |
| 713 ldrb r2,[r1,#3] |
| 714 # endif |
| 715 eor r3,r9,r10 @ a^b, b^c in next round |
| 716 #else |
| 717 ldr r2,[sp,#13*4] @ from future BODY_16_xx |
| 718 eor r3,r9,r10 @ a^b, b^c in next round |
| 719 ldr r1,[sp,#10*4] @ from future BODY_16_xx |
| 720 #endif |
| 721 eor r0,r0,r9,ror#20 @ Sigma0(a) |
| 722 and r12,r12,r3 @ (b^c)&=(a^b) |
| 723 add r4,r4,r8 @ d+=h |
| 724 eor r12,r12,r10 @ Maj(a,b,c) |
| 725 add r8,r8,r0,ror#2 @ h+=Sigma0(a) |
| 726 @ add r8,r8,r12 @ h+=Maj(a,b,c) |
| 727 #if __ARM_ARCH__>=7 |
| 728 @ ldr r2,[r1],#4 @ 12 |
| 729 # if 12==15 |
| 730 str r1,[sp,#17*4] @ make room for r1 |
| 731 # endif |
| 732 eor r0,r4,r4,ror#5 |
| 733 add r8,r8,r12 @ h+=Maj(a,b,c) from the past |
| 734 eor r0,r0,r4,ror#19 @ Sigma1(e) |
| 735 rev r2,r2 |
| 736 #else |
| 737 @ ldrb r2,[r1,#3] @ 12 |
| 738 add r8,r8,r12 @ h+=Maj(a,b,c) from the past |
| 739 ldrb r12,[r1,#2] |
| 740 ldrb r0,[r1,#1] |
| 741 orr r2,r2,r12,lsl#8 |
| 742 ldrb r12,[r1],#4 |
| 743 orr r2,r2,r0,lsl#16 |
| 744 # if 12==15 |
| 745 str r1,[sp,#17*4] @ make room for r1 |
| 746 # endif |
| 747 eor r0,r4,r4,ror#5 |
| 748 orr r2,r2,r12,lsl#24 |
| 749 eor r0,r0,r4,ror#19 @ Sigma1(e) |
| 750 #endif |
| 751 ldr r12,[r14],#4 @ *K256++ |
| 752 add r7,r7,r2 @ h+=X[i] |
| 753 str r2,[sp,#12*4] |
| 754 eor r2,r5,r6 |
| 755 add r7,r7,r0,ror#6 @ h+=Sigma1(e) |
| 756 and r2,r2,r4 |
| 757 add r7,r7,r12 @ h+=K256[i] |
| 758 eor r2,r2,r6 @ Ch(e,f,g) |
| 759 eor r0,r8,r8,ror#11 |
| 760 add r7,r7,r2 @ h+=Ch(e,f,g) |
| 761 #if 12==31 |
| 762 and r12,r12,#0xff |
| 763 cmp r12,#0xf2 @ done? |
| 764 #endif |
| 765 #if 12<15 |
| 766 # if __ARM_ARCH__>=7 |
| 767 ldr r2,[r1],#4 @ prefetch |
| 768 # else |
| 769 ldrb r2,[r1,#3] |
| 770 # endif |
| 771 eor r12,r8,r9 @ a^b, b^c in next round |
| 772 #else |
| 773 ldr r2,[sp,#14*4] @ from future BODY_16_xx |
| 774 eor r12,r8,r9 @ a^b, b^c in next round |
| 775 ldr r1,[sp,#11*4] @ from future BODY_16_xx |
| 776 #endif |
| 777 eor r0,r0,r8,ror#20 @ Sigma0(a) |
| 778 and r3,r3,r12 @ (b^c)&=(a^b) |
| 779 add r11,r11,r7 @ d+=h |
| 780 eor r3,r3,r9 @ Maj(a,b,c) |
| 781 add r7,r7,r0,ror#2 @ h+=Sigma0(a) |
| 782 @ add r7,r7,r3 @ h+=Maj(a,b,c) |
| 783 #if __ARM_ARCH__>=7 |
| 784 @ ldr r2,[r1],#4 @ 13 |
| 785 # if 13==15 |
| 786 str r1,[sp,#17*4] @ make room for r1 |
| 787 # endif |
| 788 eor r0,r11,r11,ror#5 |
| 789 add r7,r7,r3 @ h+=Maj(a,b,c) from the past |
| 790 eor r0,r0,r11,ror#19 @ Sigma1(e) |
| 791 rev r2,r2 |
| 792 #else |
| 793 @ ldrb r2,[r1,#3] @ 13 |
| 794 add r7,r7,r3 @ h+=Maj(a,b,c) from the past |
| 795 ldrb r3,[r1,#2] |
| 796 ldrb r0,[r1,#1] |
| 797 orr r2,r2,r3,lsl#8 |
| 798 ldrb r3,[r1],#4 |
| 799 orr r2,r2,r0,lsl#16 |
| 800 # if 13==15 |
| 801 str r1,[sp,#17*4] @ make room for r1 |
| 802 # endif |
| 803 eor r0,r11,r11,ror#5 |
| 804 orr r2,r2,r3,lsl#24 |
| 805 eor r0,r0,r11,ror#19 @ Sigma1(e) |
| 806 #endif |
| 807 ldr r3,[r14],#4 @ *K256++ |
| 808 add r6,r6,r2 @ h+=X[i] |
| 809 str r2,[sp,#13*4] |
| 810 eor r2,r4,r5 |
| 811 add r6,r6,r0,ror#6 @ h+=Sigma1(e) |
| 812 and r2,r2,r11 |
| 813 add r6,r6,r3 @ h+=K256[i] |
| 814 eor r2,r2,r5 @ Ch(e,f,g) |
| 815 eor r0,r7,r7,ror#11 |
| 816 add r6,r6,r2 @ h+=Ch(e,f,g) |
| 817 #if 13==31 |
| 818 and r3,r3,#0xff |
| 819 cmp r3,#0xf2 @ done? |
| 820 #endif |
| 821 #if 13<15 |
| 822 # if __ARM_ARCH__>=7 |
| 823 ldr r2,[r1],#4 @ prefetch |
| 824 # else |
| 825 ldrb r2,[r1,#3] |
| 826 # endif |
| 827 eor r3,r7,r8 @ a^b, b^c in next round |
| 828 #else |
| 829 ldr r2,[sp,#15*4] @ from future BODY_16_xx |
| 830 eor r3,r7,r8 @ a^b, b^c in next round |
| 831 ldr r1,[sp,#12*4] @ from future BODY_16_xx |
| 832 #endif |
| 833 eor r0,r0,r7,ror#20 @ Sigma0(a) |
| 834 and r12,r12,r3 @ (b^c)&=(a^b) |
| 835 add r10,r10,r6 @ d+=h |
| 836 eor r12,r12,r8 @ Maj(a,b,c) |
| 837 add r6,r6,r0,ror#2 @ h+=Sigma0(a) |
| 838 @ add r6,r6,r12 @ h+=Maj(a,b,c) |
| 839 #if __ARM_ARCH__>=7 |
| 840 @ ldr r2,[r1],#4 @ 14 |
| 841 # if 14==15 |
| 842 str r1,[sp,#17*4] @ make room for r1 |
| 843 # endif |
| 844 eor r0,r10,r10,ror#5 |
| 845 add r6,r6,r12 @ h+=Maj(a,b,c) from the past |
| 846 eor r0,r0,r10,ror#19 @ Sigma1(e) |
| 847 rev r2,r2 |
| 848 #else |
| 849 @ ldrb r2,[r1,#3] @ 14 |
| 850 add r6,r6,r12 @ h+=Maj(a,b,c) from the past |
| 851 ldrb r12,[r1,#2] |
| 852 ldrb r0,[r1,#1] |
| 853 orr r2,r2,r12,lsl#8 |
| 854 ldrb r12,[r1],#4 |
| 855 orr r2,r2,r0,lsl#16 |
| 856 # if 14==15 |
| 857 str r1,[sp,#17*4] @ make room for r1 |
| 858 # endif |
| 859 eor r0,r10,r10,ror#5 |
| 860 orr r2,r2,r12,lsl#24 |
| 861 eor r0,r0,r10,ror#19 @ Sigma1(e) |
| 862 #endif |
| 863 ldr r12,[r14],#4 @ *K256++ |
| 864 add r5,r5,r2 @ h+=X[i] |
| 865 str r2,[sp,#14*4] |
| 866 eor r2,r11,r4 |
| 867 add r5,r5,r0,ror#6 @ h+=Sigma1(e) |
| 868 and r2,r2,r10 |
| 869 add r5,r5,r12 @ h+=K256[i] |
| 870 eor r2,r2,r4 @ Ch(e,f,g) |
| 871 eor r0,r6,r6,ror#11 |
| 872 add r5,r5,r2 @ h+=Ch(e,f,g) |
| 873 #if 14==31 |
| 874 and r12,r12,#0xff |
| 875 cmp r12,#0xf2 @ done? |
| 876 #endif |
| 877 #if 14<15 |
| 878 # if __ARM_ARCH__>=7 |
| 879 ldr r2,[r1],#4 @ prefetch |
| 880 # else |
| 881 ldrb r2,[r1,#3] |
| 882 # endif |
| 883 eor r12,r6,r7 @ a^b, b^c in next round |
| 884 #else |
| 885 ldr r2,[sp,#0*4] @ from future BODY_16_xx |
| 886 eor r12,r6,r7 @ a^b, b^c in next round |
| 887 ldr r1,[sp,#13*4] @ from future BODY_16_xx |
| 888 #endif |
| 889 eor r0,r0,r6,ror#20 @ Sigma0(a) |
| 890 and r3,r3,r12 @ (b^c)&=(a^b) |
| 891 add r9,r9,r5 @ d+=h |
| 892 eor r3,r3,r7 @ Maj(a,b,c) |
| 893 add r5,r5,r0,ror#2 @ h+=Sigma0(a) |
| 894 @ add r5,r5,r3 @ h+=Maj(a,b,c) |
| 895 #if __ARM_ARCH__>=7 |
| 896 @ ldr r2,[r1],#4 @ 15 |
| 897 # if 15==15 |
| 898 str r1,[sp,#17*4] @ make room for r1 |
| 899 # endif |
| 900 eor r0,r9,r9,ror#5 |
| 901 add r5,r5,r3 @ h+=Maj(a,b,c) from the past |
| 902 eor r0,r0,r9,ror#19 @ Sigma1(e) |
| 903 rev r2,r2 |
| 904 #else |
| 905 @ ldrb r2,[r1,#3] @ 15 |
| 906 add r5,r5,r3 @ h+=Maj(a,b,c) from the past |
| 907 ldrb r3,[r1,#2] |
| 908 ldrb r0,[r1,#1] |
| 909 orr r2,r2,r3,lsl#8 |
| 910 ldrb r3,[r1],#4 |
| 911 orr r2,r2,r0,lsl#16 |
| 912 # if 15==15 |
| 913 str r1,[sp,#17*4] @ make room for r1 |
| 914 # endif |
| 915 eor r0,r9,r9,ror#5 |
| 916 orr r2,r2,r3,lsl#24 |
| 917 eor r0,r0,r9,ror#19 @ Sigma1(e) |
| 918 #endif |
| 919 ldr r3,[r14],#4 @ *K256++ |
| 920 add r4,r4,r2 @ h+=X[i] |
| 921 str r2,[sp,#15*4] |
| 922 eor r2,r10,r11 |
| 923 add r4,r4,r0,ror#6 @ h+=Sigma1(e) |
| 924 and r2,r2,r9 |
| 925 add r4,r4,r3 @ h+=K256[i] |
| 926 eor r2,r2,r11 @ Ch(e,f,g) |
| 927 eor r0,r5,r5,ror#11 |
| 928 add r4,r4,r2 @ h+=Ch(e,f,g) |
| 929 #if 15==31 |
| 930 and r3,r3,#0xff |
| 931 cmp r3,#0xf2 @ done? |
| 932 #endif |
| 933 #if 15<15 |
| 934 # if __ARM_ARCH__>=7 |
| 935 ldr r2,[r1],#4 @ prefetch |
| 936 # else |
| 937 ldrb r2,[r1,#3] |
| 938 # endif |
| 939 eor r3,r5,r6 @ a^b, b^c in next round |
| 940 #else |
| 941 ldr r2,[sp,#1*4] @ from future BODY_16_xx |
| 942 eor r3,r5,r6 @ a^b, b^c in next round |
| 943 ldr r1,[sp,#14*4] @ from future BODY_16_xx |
| 944 #endif |
| 945 eor r0,r0,r5,ror#20 @ Sigma0(a) |
| 946 and r12,r12,r3 @ (b^c)&=(a^b) |
| 947 add r8,r8,r4 @ d+=h |
| 948 eor r12,r12,r6 @ Maj(a,b,c) |
| 949 add r4,r4,r0,ror#2 @ h+=Sigma0(a) |
| 950 @ add r4,r4,r12 @ h+=Maj(a,b,c) |
| 951 .Lrounds_16_xx: |
| 952 @ ldr r2,[sp,#1*4] @ 16 |
| 953 @ ldr r1,[sp,#14*4] |
| 954 mov r0,r2,ror#7 |
| 955 add r4,r4,r12 @ h+=Maj(a,b,c) from the past |
| 956 mov r12,r1,ror#17 |
| 957 eor r0,r0,r2,ror#18 |
| 958 eor r12,r12,r1,ror#19 |
| 959 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 960 ldr r2,[sp,#0*4] |
| 961 eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) |
| 962 ldr r1,[sp,#9*4] |
| 963 |
| 964 add r12,r12,r0 |
| 965 eor r0,r8,r8,ror#5 @ from BODY_00_15 |
| 966 add r2,r2,r12 |
| 967 eor r0,r0,r8,ror#19 @ Sigma1(e) |
| 968 add r2,r2,r1 @ X[i] |
| 969 ldr r12,[r14],#4 @ *K256++ |
| 970 add r11,r11,r2 @ h+=X[i] |
| 971 str r2,[sp,#0*4] |
| 972 eor r2,r9,r10 |
| 973 add r11,r11,r0,ror#6 @ h+=Sigma1(e) |
| 974 and r2,r2,r8 |
| 975 add r11,r11,r12 @ h+=K256[i] |
| 976 eor r2,r2,r10 @ Ch(e,f,g) |
| 977 eor r0,r4,r4,ror#11 |
| 978 add r11,r11,r2 @ h+=Ch(e,f,g) |
| 979 #if 16==31 |
| 980 and r12,r12,#0xff |
| 981 cmp r12,#0xf2 @ done? |
| 982 #endif |
| 983 #if 16<15 |
| 984 # if __ARM_ARCH__>=7 |
| 985 ldr r2,[r1],#4 @ prefetch |
| 986 # else |
| 987 ldrb r2,[r1,#3] |
| 988 # endif |
| 989 eor r12,r4,r5 @ a^b, b^c in next round |
| 990 #else |
| 991 ldr r2,[sp,#2*4] @ from future BODY_16_xx |
| 992 eor r12,r4,r5 @ a^b, b^c in next round |
| 993 ldr r1,[sp,#15*4] @ from future BODY_16_xx |
| 994 #endif |
| 995 eor r0,r0,r4,ror#20 @ Sigma0(a) |
| 996 and r3,r3,r12 @ (b^c)&=(a^b) |
| 997 add r7,r7,r11 @ d+=h |
| 998 eor r3,r3,r5 @ Maj(a,b,c) |
| 999 add r11,r11,r0,ror#2 @ h+=Sigma0(a) |
| 1000 @ add r11,r11,r3 @ h+=Maj(a,b,c) |
| 1001 @ ldr r2,[sp,#2*4] @ 17 |
| 1002 @ ldr r1,[sp,#15*4] |
| 1003 mov r0,r2,ror#7 |
| 1004 add r11,r11,r3 @ h+=Maj(a,b,c) from the past |
| 1005 mov r3,r1,ror#17 |
| 1006 eor r0,r0,r2,ror#18 |
| 1007 eor r3,r3,r1,ror#19 |
| 1008 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1009 ldr r2,[sp,#1*4] |
| 1010 eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) |
| 1011 ldr r1,[sp,#10*4] |
| 1012 |
| 1013 add r3,r3,r0 |
| 1014 eor r0,r7,r7,ror#5 @ from BODY_00_15 |
| 1015 add r2,r2,r3 |
| 1016 eor r0,r0,r7,ror#19 @ Sigma1(e) |
| 1017 add r2,r2,r1 @ X[i] |
| 1018 ldr r3,[r14],#4 @ *K256++ |
| 1019 add r10,r10,r2 @ h+=X[i] |
| 1020 str r2,[sp,#1*4] |
| 1021 eor r2,r8,r9 |
| 1022 add r10,r10,r0,ror#6 @ h+=Sigma1(e) |
| 1023 and r2,r2,r7 |
| 1024 add r10,r10,r3 @ h+=K256[i] |
| 1025 eor r2,r2,r9 @ Ch(e,f,g) |
| 1026 eor r0,r11,r11,ror#11 |
| 1027 add r10,r10,r2 @ h+=Ch(e,f,g) |
| 1028 #if 17==31 |
| 1029 and r3,r3,#0xff |
| 1030 cmp r3,#0xf2 @ done? |
| 1031 #endif |
| 1032 #if 17<15 |
| 1033 # if __ARM_ARCH__>=7 |
| 1034 ldr r2,[r1],#4 @ prefetch |
| 1035 # else |
| 1036 ldrb r2,[r1,#3] |
| 1037 # endif |
| 1038 eor r3,r11,r4 @ a^b, b^c in next round |
| 1039 #else |
| 1040 ldr r2,[sp,#3*4] @ from future BODY_16_xx |
| 1041 eor r3,r11,r4 @ a^b, b^c in next round |
| 1042 ldr r1,[sp,#0*4] @ from future BODY_16_xx |
| 1043 #endif |
| 1044 eor r0,r0,r11,ror#20 @ Sigma0(a) |
| 1045 and r12,r12,r3 @ (b^c)&=(a^b) |
| 1046 add r6,r6,r10 @ d+=h |
| 1047 eor r12,r12,r4 @ Maj(a,b,c) |
| 1048 add r10,r10,r0,ror#2 @ h+=Sigma0(a) |
| 1049 @ add r10,r10,r12 @ h+=Maj(a,b,c) |
| 1050 @ ldr r2,[sp,#3*4] @ 18 |
| 1051 @ ldr r1,[sp,#0*4] |
| 1052 mov r0,r2,ror#7 |
| 1053 add r10,r10,r12 @ h+=Maj(a,b,c) from the past |
| 1054 mov r12,r1,ror#17 |
| 1055 eor r0,r0,r2,ror#18 |
| 1056 eor r12,r12,r1,ror#19 |
| 1057 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1058 ldr r2,[sp,#2*4] |
| 1059 eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) |
| 1060 ldr r1,[sp,#11*4] |
| 1061 |
| 1062 add r12,r12,r0 |
| 1063 eor r0,r6,r6,ror#5 @ from BODY_00_15 |
| 1064 add r2,r2,r12 |
| 1065 eor r0,r0,r6,ror#19 @ Sigma1(e) |
| 1066 add r2,r2,r1 @ X[i] |
| 1067 ldr r12,[r14],#4 @ *K256++ |
| 1068 add r9,r9,r2 @ h+=X[i] |
| 1069 str r2,[sp,#2*4] |
| 1070 eor r2,r7,r8 |
| 1071 add r9,r9,r0,ror#6 @ h+=Sigma1(e) |
| 1072 and r2,r2,r6 |
| 1073 add r9,r9,r12 @ h+=K256[i] |
| 1074 eor r2,r2,r8 @ Ch(e,f,g) |
| 1075 eor r0,r10,r10,ror#11 |
| 1076 add r9,r9,r2 @ h+=Ch(e,f,g) |
| 1077 #if 18==31 |
| 1078 and r12,r12,#0xff |
| 1079 cmp r12,#0xf2 @ done? |
| 1080 #endif |
| 1081 #if 18<15 |
| 1082 # if __ARM_ARCH__>=7 |
| 1083 ldr r2,[r1],#4 @ prefetch |
| 1084 # else |
| 1085 ldrb r2,[r1,#3] |
| 1086 # endif |
| 1087 eor r12,r10,r11 @ a^b, b^c in next round |
| 1088 #else |
| 1089 ldr r2,[sp,#4*4] @ from future BODY_16_xx |
| 1090 eor r12,r10,r11 @ a^b, b^c in next round |
| 1091 ldr r1,[sp,#1*4] @ from future BODY_16_xx |
| 1092 #endif |
| 1093 eor r0,r0,r10,ror#20 @ Sigma0(a) |
| 1094 and r3,r3,r12 @ (b^c)&=(a^b) |
| 1095 add r5,r5,r9 @ d+=h |
| 1096 eor r3,r3,r11 @ Maj(a,b,c) |
| 1097 add r9,r9,r0,ror#2 @ h+=Sigma0(a) |
| 1098 @ add r9,r9,r3 @ h+=Maj(a,b,c) |
| 1099 @ ldr r2,[sp,#4*4] @ 19 |
| 1100 @ ldr r1,[sp,#1*4] |
| 1101 mov r0,r2,ror#7 |
| 1102 add r9,r9,r3 @ h+=Maj(a,b,c) from the past |
| 1103 mov r3,r1,ror#17 |
| 1104 eor r0,r0,r2,ror#18 |
| 1105 eor r3,r3,r1,ror#19 |
| 1106 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1107 ldr r2,[sp,#3*4] |
| 1108 eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) |
| 1109 ldr r1,[sp,#12*4] |
| 1110 |
| 1111 add r3,r3,r0 |
| 1112 eor r0,r5,r5,ror#5 @ from BODY_00_15 |
| 1113 add r2,r2,r3 |
| 1114 eor r0,r0,r5,ror#19 @ Sigma1(e) |
| 1115 add r2,r2,r1 @ X[i] |
| 1116 ldr r3,[r14],#4 @ *K256++ |
| 1117 add r8,r8,r2 @ h+=X[i] |
| 1118 str r2,[sp,#3*4] |
| 1119 eor r2,r6,r7 |
| 1120 add r8,r8,r0,ror#6 @ h+=Sigma1(e) |
| 1121 and r2,r2,r5 |
| 1122 add r8,r8,r3 @ h+=K256[i] |
| 1123 eor r2,r2,r7 @ Ch(e,f,g) |
| 1124 eor r0,r9,r9,ror#11 |
| 1125 add r8,r8,r2 @ h+=Ch(e,f,g) |
| 1126 #if 19==31 |
| 1127 and r3,r3,#0xff |
| 1128 cmp r3,#0xf2 @ done? |
| 1129 #endif |
| 1130 #if 19<15 |
| 1131 # if __ARM_ARCH__>=7 |
| 1132 ldr r2,[r1],#4 @ prefetch |
| 1133 # else |
| 1134 ldrb r2,[r1,#3] |
| 1135 # endif |
| 1136 eor r3,r9,r10 @ a^b, b^c in next round |
| 1137 #else |
| 1138 ldr r2,[sp,#5*4] @ from future BODY_16_xx |
| 1139 eor r3,r9,r10 @ a^b, b^c in next round |
| 1140 ldr r1,[sp,#2*4] @ from future BODY_16_xx |
| 1141 #endif |
| 1142 eor r0,r0,r9,ror#20 @ Sigma0(a) |
| 1143 and r12,r12,r3 @ (b^c)&=(a^b) |
| 1144 add r4,r4,r8 @ d+=h |
| 1145 eor r12,r12,r10 @ Maj(a,b,c) |
| 1146 add r8,r8,r0,ror#2 @ h+=Sigma0(a) |
| 1147 @ add r8,r8,r12 @ h+=Maj(a,b,c) |
| 1148 @ ldr r2,[sp,#5*4] @ 20 |
| 1149 @ ldr r1,[sp,#2*4] |
| 1150 mov r0,r2,ror#7 |
| 1151 add r8,r8,r12 @ h+=Maj(a,b,c) from the past |
| 1152 mov r12,r1,ror#17 |
| 1153 eor r0,r0,r2,ror#18 |
| 1154 eor r12,r12,r1,ror#19 |
| 1155 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1156 ldr r2,[sp,#4*4] |
| 1157 eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) |
| 1158 ldr r1,[sp,#13*4] |
| 1159 |
| 1160 add r12,r12,r0 |
| 1161 eor r0,r4,r4,ror#5 @ from BODY_00_15 |
| 1162 add r2,r2,r12 |
| 1163 eor r0,r0,r4,ror#19 @ Sigma1(e) |
| 1164 add r2,r2,r1 @ X[i] |
| 1165 ldr r12,[r14],#4 @ *K256++ |
| 1166 add r7,r7,r2 @ h+=X[i] |
| 1167 str r2,[sp,#4*4] |
| 1168 eor r2,r5,r6 |
| 1169 add r7,r7,r0,ror#6 @ h+=Sigma1(e) |
| 1170 and r2,r2,r4 |
| 1171 add r7,r7,r12 @ h+=K256[i] |
| 1172 eor r2,r2,r6 @ Ch(e,f,g) |
| 1173 eor r0,r8,r8,ror#11 |
| 1174 add r7,r7,r2 @ h+=Ch(e,f,g) |
| 1175 #if 20==31 |
| 1176 and r12,r12,#0xff |
| 1177 cmp r12,#0xf2 @ done? |
| 1178 #endif |
| 1179 #if 20<15 |
| 1180 # if __ARM_ARCH__>=7 |
| 1181 ldr r2,[r1],#4 @ prefetch |
| 1182 # else |
| 1183 ldrb r2,[r1,#3] |
| 1184 # endif |
| 1185 eor r12,r8,r9 @ a^b, b^c in next round |
| 1186 #else |
| 1187 ldr r2,[sp,#6*4] @ from future BODY_16_xx |
| 1188 eor r12,r8,r9 @ a^b, b^c in next round |
| 1189 ldr r1,[sp,#3*4] @ from future BODY_16_xx |
| 1190 #endif |
| 1191 eor r0,r0,r8,ror#20 @ Sigma0(a) |
| 1192 and r3,r3,r12 @ (b^c)&=(a^b) |
| 1193 add r11,r11,r7 @ d+=h |
| 1194 eor r3,r3,r9 @ Maj(a,b,c) |
| 1195 add r7,r7,r0,ror#2 @ h+=Sigma0(a) |
| 1196 @ add r7,r7,r3 @ h+=Maj(a,b,c) |
| 1197 @ ldr r2,[sp,#6*4] @ 21 |
| 1198 @ ldr r1,[sp,#3*4] |
| 1199 mov r0,r2,ror#7 |
| 1200 add r7,r7,r3 @ h+=Maj(a,b,c) from the past |
| 1201 mov r3,r1,ror#17 |
| 1202 eor r0,r0,r2,ror#18 |
| 1203 eor r3,r3,r1,ror#19 |
| 1204 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1205 ldr r2,[sp,#5*4] |
| 1206 eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) |
| 1207 ldr r1,[sp,#14*4] |
| 1208 |
| 1209 add r3,r3,r0 |
| 1210 eor r0,r11,r11,ror#5 @ from BODY_00_15 |
| 1211 add r2,r2,r3 |
| 1212 eor r0,r0,r11,ror#19 @ Sigma1(e) |
| 1213 add r2,r2,r1 @ X[i] |
| 1214 ldr r3,[r14],#4 @ *K256++ |
| 1215 add r6,r6,r2 @ h+=X[i] |
| 1216 str r2,[sp,#5*4] |
| 1217 eor r2,r4,r5 |
| 1218 add r6,r6,r0,ror#6 @ h+=Sigma1(e) |
| 1219 and r2,r2,r11 |
| 1220 add r6,r6,r3 @ h+=K256[i] |
| 1221 eor r2,r2,r5 @ Ch(e,f,g) |
| 1222 eor r0,r7,r7,ror#11 |
| 1223 add r6,r6,r2 @ h+=Ch(e,f,g) |
| 1224 #if 21==31 |
| 1225 and r3,r3,#0xff |
| 1226 cmp r3,#0xf2 @ done? |
| 1227 #endif |
| 1228 #if 21<15 |
| 1229 # if __ARM_ARCH__>=7 |
| 1230 ldr r2,[r1],#4 @ prefetch |
| 1231 # else |
| 1232 ldrb r2,[r1,#3] |
| 1233 # endif |
| 1234 eor r3,r7,r8 @ a^b, b^c in next round |
| 1235 #else |
| 1236 ldr r2,[sp,#7*4] @ from future BODY_16_xx |
| 1237 eor r3,r7,r8 @ a^b, b^c in next round |
| 1238 ldr r1,[sp,#4*4] @ from future BODY_16_xx |
| 1239 #endif |
| 1240 eor r0,r0,r7,ror#20 @ Sigma0(a) |
| 1241 and r12,r12,r3 @ (b^c)&=(a^b) |
| 1242 add r10,r10,r6 @ d+=h |
| 1243 eor r12,r12,r8 @ Maj(a,b,c) |
| 1244 add r6,r6,r0,ror#2 @ h+=Sigma0(a) |
| 1245 @ add r6,r6,r12 @ h+=Maj(a,b,c) |
| 1246 @ ldr r2,[sp,#7*4] @ 22 |
| 1247 @ ldr r1,[sp,#4*4] |
| 1248 mov r0,r2,ror#7 |
| 1249 add r6,r6,r12 @ h+=Maj(a,b,c) from the past |
| 1250 mov r12,r1,ror#17 |
| 1251 eor r0,r0,r2,ror#18 |
| 1252 eor r12,r12,r1,ror#19 |
| 1253 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1254 ldr r2,[sp,#6*4] |
| 1255 eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) |
| 1256 ldr r1,[sp,#15*4] |
| 1257 |
| 1258 add r12,r12,r0 |
| 1259 eor r0,r10,r10,ror#5 @ from BODY_00_15 |
| 1260 add r2,r2,r12 |
| 1261 eor r0,r0,r10,ror#19 @ Sigma1(e) |
| 1262 add r2,r2,r1 @ X[i] |
| 1263 ldr r12,[r14],#4 @ *K256++ |
| 1264 add r5,r5,r2 @ h+=X[i] |
| 1265 str r2,[sp,#6*4] |
| 1266 eor r2,r11,r4 |
| 1267 add r5,r5,r0,ror#6 @ h+=Sigma1(e) |
| 1268 and r2,r2,r10 |
| 1269 add r5,r5,r12 @ h+=K256[i] |
| 1270 eor r2,r2,r4 @ Ch(e,f,g) |
| 1271 eor r0,r6,r6,ror#11 |
| 1272 add r5,r5,r2 @ h+=Ch(e,f,g) |
| 1273 #if 22==31 |
| 1274 and r12,r12,#0xff |
| 1275 cmp r12,#0xf2 @ done? |
| 1276 #endif |
| 1277 #if 22<15 |
| 1278 # if __ARM_ARCH__>=7 |
| 1279 ldr r2,[r1],#4 @ prefetch |
| 1280 # else |
| 1281 ldrb r2,[r1,#3] |
| 1282 # endif |
| 1283 eor r12,r6,r7 @ a^b, b^c in next round |
| 1284 #else |
| 1285 ldr r2,[sp,#8*4] @ from future BODY_16_xx |
| 1286 eor r12,r6,r7 @ a^b, b^c in next round |
| 1287 ldr r1,[sp,#5*4] @ from future BODY_16_xx |
| 1288 #endif |
| 1289 eor r0,r0,r6,ror#20 @ Sigma0(a) |
| 1290 and r3,r3,r12 @ (b^c)&=(a^b) |
| 1291 add r9,r9,r5 @ d+=h |
| 1292 eor r3,r3,r7 @ Maj(a,b,c) |
| 1293 add r5,r5,r0,ror#2 @ h+=Sigma0(a) |
| 1294 @ add r5,r5,r3 @ h+=Maj(a,b,c) |
| 1295 @ ldr r2,[sp,#8*4] @ 23 |
| 1296 @ ldr r1,[sp,#5*4] |
| 1297 mov r0,r2,ror#7 |
| 1298 add r5,r5,r3 @ h+=Maj(a,b,c) from the past |
| 1299 mov r3,r1,ror#17 |
| 1300 eor r0,r0,r2,ror#18 |
| 1301 eor r3,r3,r1,ror#19 |
| 1302 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1303 ldr r2,[sp,#7*4] |
| 1304 eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) |
| 1305 ldr r1,[sp,#0*4] |
| 1306 |
| 1307 add r3,r3,r0 |
| 1308 eor r0,r9,r9,ror#5 @ from BODY_00_15 |
| 1309 add r2,r2,r3 |
| 1310 eor r0,r0,r9,ror#19 @ Sigma1(e) |
| 1311 add r2,r2,r1 @ X[i] |
| 1312 ldr r3,[r14],#4 @ *K256++ |
| 1313 add r4,r4,r2 @ h+=X[i] |
| 1314 str r2,[sp,#7*4] |
| 1315 eor r2,r10,r11 |
| 1316 add r4,r4,r0,ror#6 @ h+=Sigma1(e) |
| 1317 and r2,r2,r9 |
| 1318 add r4,r4,r3 @ h+=K256[i] |
| 1319 eor r2,r2,r11 @ Ch(e,f,g) |
| 1320 eor r0,r5,r5,ror#11 |
| 1321 add r4,r4,r2 @ h+=Ch(e,f,g) |
| 1322 #if 23==31 |
| 1323 and r3,r3,#0xff |
| 1324 cmp r3,#0xf2 @ done? |
| 1325 #endif |
| 1326 #if 23<15 |
| 1327 # if __ARM_ARCH__>=7 |
| 1328 ldr r2,[r1],#4 @ prefetch |
| 1329 # else |
| 1330 ldrb r2,[r1,#3] |
| 1331 # endif |
| 1332 eor r3,r5,r6 @ a^b, b^c in next round |
| 1333 #else |
| 1334 ldr r2,[sp,#9*4] @ from future BODY_16_xx |
| 1335 eor r3,r5,r6 @ a^b, b^c in next round |
| 1336 ldr r1,[sp,#6*4] @ from future BODY_16_xx |
| 1337 #endif |
| 1338 eor r0,r0,r5,ror#20 @ Sigma0(a) |
| 1339 and r12,r12,r3 @ (b^c)&=(a^b) |
| 1340 add r8,r8,r4 @ d+=h |
| 1341 eor r12,r12,r6 @ Maj(a,b,c) |
| 1342 add r4,r4,r0,ror#2 @ h+=Sigma0(a) |
| 1343 @ add r4,r4,r12 @ h+=Maj(a,b,c) |
| 1344 @ ldr r2,[sp,#9*4] @ 24 |
| 1345 @ ldr r1,[sp,#6*4] |
| 1346 mov r0,r2,ror#7 |
| 1347 add r4,r4,r12 @ h+=Maj(a,b,c) from the past |
| 1348 mov r12,r1,ror#17 |
| 1349 eor r0,r0,r2,ror#18 |
| 1350 eor r12,r12,r1,ror#19 |
| 1351 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1352 ldr r2,[sp,#8*4] |
| 1353 eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) |
| 1354 ldr r1,[sp,#1*4] |
| 1355 |
| 1356 add r12,r12,r0 |
| 1357 eor r0,r8,r8,ror#5 @ from BODY_00_15 |
| 1358 add r2,r2,r12 |
| 1359 eor r0,r0,r8,ror#19 @ Sigma1(e) |
| 1360 add r2,r2,r1 @ X[i] |
| 1361 ldr r12,[r14],#4 @ *K256++ |
| 1362 add r11,r11,r2 @ h+=X[i] |
| 1363 str r2,[sp,#8*4] |
| 1364 eor r2,r9,r10 |
| 1365 add r11,r11,r0,ror#6 @ h+=Sigma1(e) |
| 1366 and r2,r2,r8 |
| 1367 add r11,r11,r12 @ h+=K256[i] |
| 1368 eor r2,r2,r10 @ Ch(e,f,g) |
| 1369 eor r0,r4,r4,ror#11 |
| 1370 add r11,r11,r2 @ h+=Ch(e,f,g) |
| 1371 #if 24==31 |
| 1372 and r12,r12,#0xff |
| 1373 cmp r12,#0xf2 @ done? |
| 1374 #endif |
| 1375 #if 24<15 |
| 1376 # if __ARM_ARCH__>=7 |
| 1377 ldr r2,[r1],#4 @ prefetch |
| 1378 # else |
| 1379 ldrb r2,[r1,#3] |
| 1380 # endif |
| 1381 eor r12,r4,r5 @ a^b, b^c in next round |
| 1382 #else |
| 1383 ldr r2,[sp,#10*4] @ from future BODY_16_xx |
| 1384 eor r12,r4,r5 @ a^b, b^c in next round |
| 1385 ldr r1,[sp,#7*4] @ from future BODY_16_xx |
| 1386 #endif |
| 1387 eor r0,r0,r4,ror#20 @ Sigma0(a) |
| 1388 and r3,r3,r12 @ (b^c)&=(a^b) |
| 1389 add r7,r7,r11 @ d+=h |
| 1390 eor r3,r3,r5 @ Maj(a,b,c) |
| 1391 add r11,r11,r0,ror#2 @ h+=Sigma0(a) |
| 1392 @ add r11,r11,r3 @ h+=Maj(a,b,c) |
| 1393 @ ldr r2,[sp,#10*4] @ 25 |
| 1394 @ ldr r1,[sp,#7*4] |
| 1395 mov r0,r2,ror#7 |
| 1396 add r11,r11,r3 @ h+=Maj(a,b,c) from the past |
| 1397 mov r3,r1,ror#17 |
| 1398 eor r0,r0,r2,ror#18 |
| 1399 eor r3,r3,r1,ror#19 |
| 1400 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1401 ldr r2,[sp,#9*4] |
| 1402 eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) |
| 1403 ldr r1,[sp,#2*4] |
| 1404 |
| 1405 add r3,r3,r0 |
| 1406 eor r0,r7,r7,ror#5 @ from BODY_00_15 |
| 1407 add r2,r2,r3 |
| 1408 eor r0,r0,r7,ror#19 @ Sigma1(e) |
| 1409 add r2,r2,r1 @ X[i] |
| 1410 ldr r3,[r14],#4 @ *K256++ |
| 1411 add r10,r10,r2 @ h+=X[i] |
| 1412 str r2,[sp,#9*4] |
| 1413 eor r2,r8,r9 |
| 1414 add r10,r10,r0,ror#6 @ h+=Sigma1(e) |
| 1415 and r2,r2,r7 |
| 1416 add r10,r10,r3 @ h+=K256[i] |
| 1417 eor r2,r2,r9 @ Ch(e,f,g) |
| 1418 eor r0,r11,r11,ror#11 |
| 1419 add r10,r10,r2 @ h+=Ch(e,f,g) |
| 1420 #if 25==31 |
| 1421 and r3,r3,#0xff |
| 1422 cmp r3,#0xf2 @ done? |
| 1423 #endif |
| 1424 #if 25<15 |
| 1425 # if __ARM_ARCH__>=7 |
| 1426 ldr r2,[r1],#4 @ prefetch |
| 1427 # else |
| 1428 ldrb r2,[r1,#3] |
| 1429 # endif |
| 1430 eor r3,r11,r4 @ a^b, b^c in next round |
| 1431 #else |
| 1432 ldr r2,[sp,#11*4] @ from future BODY_16_xx |
| 1433 eor r3,r11,r4 @ a^b, b^c in next round |
| 1434 ldr r1,[sp,#8*4] @ from future BODY_16_xx |
| 1435 #endif |
| 1436 eor r0,r0,r11,ror#20 @ Sigma0(a) |
| 1437 and r12,r12,r3 @ (b^c)&=(a^b) |
| 1438 add r6,r6,r10 @ d+=h |
| 1439 eor r12,r12,r4 @ Maj(a,b,c) |
| 1440 add r10,r10,r0,ror#2 @ h+=Sigma0(a) |
| 1441 @ add r10,r10,r12 @ h+=Maj(a,b,c) |
| 1442 @ ldr r2,[sp,#11*4] @ 26 |
| 1443 @ ldr r1,[sp,#8*4] |
| 1444 mov r0,r2,ror#7 |
| 1445 add r10,r10,r12 @ h+=Maj(a,b,c) from the past |
| 1446 mov r12,r1,ror#17 |
| 1447 eor r0,r0,r2,ror#18 |
| 1448 eor r12,r12,r1,ror#19 |
| 1449 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1450 ldr r2,[sp,#10*4] |
| 1451 eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) |
| 1452 ldr r1,[sp,#3*4] |
| 1453 |
| 1454 add r12,r12,r0 |
| 1455 eor r0,r6,r6,ror#5 @ from BODY_00_15 |
| 1456 add r2,r2,r12 |
| 1457 eor r0,r0,r6,ror#19 @ Sigma1(e) |
| 1458 add r2,r2,r1 @ X[i] |
| 1459 ldr r12,[r14],#4 @ *K256++ |
| 1460 add r9,r9,r2 @ h+=X[i] |
| 1461 str r2,[sp,#10*4] |
| 1462 eor r2,r7,r8 |
| 1463 add r9,r9,r0,ror#6 @ h+=Sigma1(e) |
| 1464 and r2,r2,r6 |
| 1465 add r9,r9,r12 @ h+=K256[i] |
| 1466 eor r2,r2,r8 @ Ch(e,f,g) |
| 1467 eor r0,r10,r10,ror#11 |
| 1468 add r9,r9,r2 @ h+=Ch(e,f,g) |
| 1469 #if 26==31 |
| 1470 and r12,r12,#0xff |
| 1471 cmp r12,#0xf2 @ done? |
| 1472 #endif |
| 1473 #if 26<15 |
| 1474 # if __ARM_ARCH__>=7 |
| 1475 ldr r2,[r1],#4 @ prefetch |
| 1476 # else |
| 1477 ldrb r2,[r1,#3] |
| 1478 # endif |
| 1479 eor r12,r10,r11 @ a^b, b^c in next round |
| 1480 #else |
| 1481 ldr r2,[sp,#12*4] @ from future BODY_16_xx |
| 1482 eor r12,r10,r11 @ a^b, b^c in next round |
| 1483 ldr r1,[sp,#9*4] @ from future BODY_16_xx |
| 1484 #endif |
| 1485 eor r0,r0,r10,ror#20 @ Sigma0(a) |
| 1486 and r3,r3,r12 @ (b^c)&=(a^b) |
| 1487 add r5,r5,r9 @ d+=h |
| 1488 eor r3,r3,r11 @ Maj(a,b,c) |
| 1489 add r9,r9,r0,ror#2 @ h+=Sigma0(a) |
| 1490 @ add r9,r9,r3 @ h+=Maj(a,b,c) |
| 1491 @ ldr r2,[sp,#12*4] @ 27 |
| 1492 @ ldr r1,[sp,#9*4] |
| 1493 mov r0,r2,ror#7 |
| 1494 add r9,r9,r3 @ h+=Maj(a,b,c) from the past |
| 1495 mov r3,r1,ror#17 |
| 1496 eor r0,r0,r2,ror#18 |
| 1497 eor r3,r3,r1,ror#19 |
| 1498 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1499 ldr r2,[sp,#11*4] |
| 1500 eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) |
| 1501 ldr r1,[sp,#4*4] |
| 1502 |
| 1503 add r3,r3,r0 |
| 1504 eor r0,r5,r5,ror#5 @ from BODY_00_15 |
| 1505 add r2,r2,r3 |
| 1506 eor r0,r0,r5,ror#19 @ Sigma1(e) |
| 1507 add r2,r2,r1 @ X[i] |
| 1508 ldr r3,[r14],#4 @ *K256++ |
| 1509 add r8,r8,r2 @ h+=X[i] |
| 1510 str r2,[sp,#11*4] |
| 1511 eor r2,r6,r7 |
| 1512 add r8,r8,r0,ror#6 @ h+=Sigma1(e) |
| 1513 and r2,r2,r5 |
| 1514 add r8,r8,r3 @ h+=K256[i] |
| 1515 eor r2,r2,r7 @ Ch(e,f,g) |
| 1516 eor r0,r9,r9,ror#11 |
| 1517 add r8,r8,r2 @ h+=Ch(e,f,g) |
| 1518 #if 27==31 |
| 1519 and r3,r3,#0xff |
| 1520 cmp r3,#0xf2 @ done? |
| 1521 #endif |
| 1522 #if 27<15 |
| 1523 # if __ARM_ARCH__>=7 |
| 1524 ldr r2,[r1],#4 @ prefetch |
| 1525 # else |
| 1526 ldrb r2,[r1,#3] |
| 1527 # endif |
| 1528 eor r3,r9,r10 @ a^b, b^c in next round |
| 1529 #else |
| 1530 ldr r2,[sp,#13*4] @ from future BODY_16_xx |
| 1531 eor r3,r9,r10 @ a^b, b^c in next round |
| 1532 ldr r1,[sp,#10*4] @ from future BODY_16_xx |
| 1533 #endif |
| 1534 eor r0,r0,r9,ror#20 @ Sigma0(a) |
| 1535 and r12,r12,r3 @ (b^c)&=(a^b) |
| 1536 add r4,r4,r8 @ d+=h |
| 1537 eor r12,r12,r10 @ Maj(a,b,c) |
| 1538 add r8,r8,r0,ror#2 @ h+=Sigma0(a) |
| 1539 @ add r8,r8,r12 @ h+=Maj(a,b,c) |
| 1540 @ ldr r2,[sp,#13*4] @ 28 |
| 1541 @ ldr r1,[sp,#10*4] |
| 1542 mov r0,r2,ror#7 |
| 1543 add r8,r8,r12 @ h+=Maj(a,b,c) from the past |
| 1544 mov r12,r1,ror#17 |
| 1545 eor r0,r0,r2,ror#18 |
| 1546 eor r12,r12,r1,ror#19 |
| 1547 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1548 ldr r2,[sp,#12*4] |
| 1549 eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) |
| 1550 ldr r1,[sp,#5*4] |
| 1551 |
| 1552 add r12,r12,r0 |
| 1553 eor r0,r4,r4,ror#5 @ from BODY_00_15 |
| 1554 add r2,r2,r12 |
| 1555 eor r0,r0,r4,ror#19 @ Sigma1(e) |
| 1556 add r2,r2,r1 @ X[i] |
| 1557 ldr r12,[r14],#4 @ *K256++ |
| 1558 add r7,r7,r2 @ h+=X[i] |
| 1559 str r2,[sp,#12*4] |
| 1560 eor r2,r5,r6 |
| 1561 add r7,r7,r0,ror#6 @ h+=Sigma1(e) |
| 1562 and r2,r2,r4 |
| 1563 add r7,r7,r12 @ h+=K256[i] |
| 1564 eor r2,r2,r6 @ Ch(e,f,g) |
| 1565 eor r0,r8,r8,ror#11 |
| 1566 add r7,r7,r2 @ h+=Ch(e,f,g) |
| 1567 #if 28==31 |
| 1568 and r12,r12,#0xff |
| 1569 cmp r12,#0xf2 @ done? |
| 1570 #endif |
| 1571 #if 28<15 |
| 1572 # if __ARM_ARCH__>=7 |
| 1573 ldr r2,[r1],#4 @ prefetch |
| 1574 # else |
| 1575 ldrb r2,[r1,#3] |
| 1576 # endif |
| 1577 eor r12,r8,r9 @ a^b, b^c in next round |
| 1578 #else |
| 1579 ldr r2,[sp,#14*4] @ from future BODY_16_xx |
| 1580 eor r12,r8,r9 @ a^b, b^c in next round |
| 1581 ldr r1,[sp,#11*4] @ from future BODY_16_xx |
| 1582 #endif |
| 1583 eor r0,r0,r8,ror#20 @ Sigma0(a) |
| 1584 and r3,r3,r12 @ (b^c)&=(a^b) |
| 1585 add r11,r11,r7 @ d+=h |
| 1586 eor r3,r3,r9 @ Maj(a,b,c) |
| 1587 add r7,r7,r0,ror#2 @ h+=Sigma0(a) |
| 1588 @ add r7,r7,r3 @ h+=Maj(a,b,c) |
| 1589 @ ldr r2,[sp,#14*4] @ 29 |
| 1590 @ ldr r1,[sp,#11*4] |
| 1591 mov r0,r2,ror#7 |
| 1592 add r7,r7,r3 @ h+=Maj(a,b,c) from the past |
| 1593 mov r3,r1,ror#17 |
| 1594 eor r0,r0,r2,ror#18 |
| 1595 eor r3,r3,r1,ror#19 |
| 1596 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1597 ldr r2,[sp,#13*4] |
| 1598 eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) |
| 1599 ldr r1,[sp,#6*4] |
| 1600 |
| 1601 add r3,r3,r0 |
| 1602 eor r0,r11,r11,ror#5 @ from BODY_00_15 |
| 1603 add r2,r2,r3 |
| 1604 eor r0,r0,r11,ror#19 @ Sigma1(e) |
| 1605 add r2,r2,r1 @ X[i] |
| 1606 ldr r3,[r14],#4 @ *K256++ |
| 1607 add r6,r6,r2 @ h+=X[i] |
| 1608 str r2,[sp,#13*4] |
| 1609 eor r2,r4,r5 |
| 1610 add r6,r6,r0,ror#6 @ h+=Sigma1(e) |
| 1611 and r2,r2,r11 |
| 1612 add r6,r6,r3 @ h+=K256[i] |
| 1613 eor r2,r2,r5 @ Ch(e,f,g) |
| 1614 eor r0,r7,r7,ror#11 |
| 1615 add r6,r6,r2 @ h+=Ch(e,f,g) |
| 1616 #if 29==31 |
| 1617 and r3,r3,#0xff |
| 1618 cmp r3,#0xf2 @ done? |
| 1619 #endif |
| 1620 #if 29<15 |
| 1621 # if __ARM_ARCH__>=7 |
| 1622 ldr r2,[r1],#4 @ prefetch |
| 1623 # else |
| 1624 ldrb r2,[r1,#3] |
| 1625 # endif |
| 1626 eor r3,r7,r8 @ a^b, b^c in next round |
| 1627 #else |
| 1628 ldr r2,[sp,#15*4] @ from future BODY_16_xx |
| 1629 eor r3,r7,r8 @ a^b, b^c in next round |
| 1630 ldr r1,[sp,#12*4] @ from future BODY_16_xx |
| 1631 #endif |
| 1632 eor r0,r0,r7,ror#20 @ Sigma0(a) |
| 1633 and r12,r12,r3 @ (b^c)&=(a^b) |
| 1634 add r10,r10,r6 @ d+=h |
| 1635 eor r12,r12,r8 @ Maj(a,b,c) |
| 1636 add r6,r6,r0,ror#2 @ h+=Sigma0(a) |
| 1637 @ add r6,r6,r12 @ h+=Maj(a,b,c) |
| 1638 @ ldr r2,[sp,#15*4] @ 30 |
| 1639 @ ldr r1,[sp,#12*4] |
| 1640 mov r0,r2,ror#7 |
| 1641 add r6,r6,r12 @ h+=Maj(a,b,c) from the past |
| 1642 mov r12,r1,ror#17 |
| 1643 eor r0,r0,r2,ror#18 |
| 1644 eor r12,r12,r1,ror#19 |
| 1645 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1646 ldr r2,[sp,#14*4] |
| 1647 eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) |
| 1648 ldr r1,[sp,#7*4] |
| 1649 |
| 1650 add r12,r12,r0 |
| 1651 eor r0,r10,r10,ror#5 @ from BODY_00_15 |
| 1652 add r2,r2,r12 |
| 1653 eor r0,r0,r10,ror#19 @ Sigma1(e) |
| 1654 add r2,r2,r1 @ X[i] |
| 1655 ldr r12,[r14],#4 @ *K256++ |
| 1656 add r5,r5,r2 @ h+=X[i] |
| 1657 str r2,[sp,#14*4] |
| 1658 eor r2,r11,r4 |
| 1659 add r5,r5,r0,ror#6 @ h+=Sigma1(e) |
| 1660 and r2,r2,r10 |
| 1661 add r5,r5,r12 @ h+=K256[i] |
| 1662 eor r2,r2,r4 @ Ch(e,f,g) |
| 1663 eor r0,r6,r6,ror#11 |
| 1664 add r5,r5,r2 @ h+=Ch(e,f,g) |
| 1665 #if 30==31 |
| 1666 and r12,r12,#0xff |
| 1667 cmp r12,#0xf2 @ done? |
| 1668 #endif |
| 1669 #if 30<15 |
| 1670 # if __ARM_ARCH__>=7 |
| 1671 ldr r2,[r1],#4 @ prefetch |
| 1672 # else |
| 1673 ldrb r2,[r1,#3] |
| 1674 # endif |
| 1675 eor r12,r6,r7 @ a^b, b^c in next round |
| 1676 #else |
| 1677 ldr r2,[sp,#0*4] @ from future BODY_16_xx |
| 1678 eor r12,r6,r7 @ a^b, b^c in next round |
| 1679 ldr r1,[sp,#13*4] @ from future BODY_16_xx |
| 1680 #endif |
| 1681 eor r0,r0,r6,ror#20 @ Sigma0(a) |
| 1682 and r3,r3,r12 @ (b^c)&=(a^b) |
| 1683 add r9,r9,r5 @ d+=h |
| 1684 eor r3,r3,r7 @ Maj(a,b,c) |
| 1685 add r5,r5,r0,ror#2 @ h+=Sigma0(a) |
| 1686 @ add r5,r5,r3 @ h+=Maj(a,b,c) |
| 1687 @ ldr r2,[sp,#0*4] @ 31 |
| 1688 @ ldr r1,[sp,#13*4] |
| 1689 mov r0,r2,ror#7 |
| 1690 add r5,r5,r3 @ h+=Maj(a,b,c) from the past |
| 1691 mov r3,r1,ror#17 |
| 1692 eor r0,r0,r2,ror#18 |
| 1693 eor r3,r3,r1,ror#19 |
| 1694 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) |
| 1695 ldr r2,[sp,#15*4] |
| 1696 eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) |
| 1697 ldr r1,[sp,#8*4] |
| 1698 |
| 1699 add r3,r3,r0 |
| 1700 eor r0,r9,r9,ror#5 @ from BODY_00_15 |
| 1701 add r2,r2,r3 |
| 1702 eor r0,r0,r9,ror#19 @ Sigma1(e) |
| 1703 add r2,r2,r1 @ X[i] |
| 1704 ldr r3,[r14],#4 @ *K256++ |
| 1705 add r4,r4,r2 @ h+=X[i] |
| 1706 str r2,[sp,#15*4] |
| 1707 eor r2,r10,r11 |
| 1708 add r4,r4,r0,ror#6 @ h+=Sigma1(e) |
| 1709 and r2,r2,r9 |
| 1710 add r4,r4,r3 @ h+=K256[i] |
| 1711 eor r2,r2,r11 @ Ch(e,f,g) |
| 1712 eor r0,r5,r5,ror#11 |
| 1713 add r4,r4,r2 @ h+=Ch(e,f,g) |
| 1714 #if 31==31 |
| 1715 and r3,r3,#0xff |
| 1716 cmp r3,#0xf2 @ done? |
| 1717 #endif |
| 1718 #if 31<15 |
| 1719 # if __ARM_ARCH__>=7 |
| 1720 ldr r2,[r1],#4 @ prefetch |
| 1721 # else |
| 1722 ldrb r2,[r1,#3] |
| 1723 # endif |
| 1724 eor r3,r5,r6 @ a^b, b^c in next round |
| 1725 #else |
| 1726 ldr r2,[sp,#1*4] @ from future BODY_16_xx |
| 1727 eor r3,r5,r6 @ a^b, b^c in next round |
| 1728 ldr r1,[sp,#14*4] @ from future BODY_16_xx |
| 1729 #endif |
| 1730 eor r0,r0,r5,ror#20 @ Sigma0(a) |
| 1731 and r12,r12,r3 @ (b^c)&=(a^b) |
| 1732 add r8,r8,r4 @ d+=h |
| 1733 eor r12,r12,r6 @ Maj(a,b,c) |
| 1734 add r4,r4,r0,ror#2 @ h+=Sigma0(a) |
| 1735 @ add r4,r4,r12 @ h+=Maj(a,b,c) |
| 1736 ldreq r3,[sp,#16*4] @ pull ctx |
| 1737 bne .Lrounds_16_xx |
| 1738 |
| 1739 add r4,r4,r12 @ h+=Maj(a,b,c) from the past |
| 1740 ldr r0,[r3,#0] |
| 1741 ldr r2,[r3,#4] |
| 1742 ldr r12,[r3,#8] |
| 1743 add r4,r4,r0 |
| 1744 ldr r0,[r3,#12] |
| 1745 add r5,r5,r2 |
| 1746 ldr r2,[r3,#16] |
| 1747 add r6,r6,r12 |
| 1748 ldr r12,[r3,#20] |
| 1749 add r7,r7,r0 |
| 1750 ldr r0,[r3,#24] |
| 1751 add r8,r8,r2 |
| 1752 ldr r2,[r3,#28] |
| 1753 add r9,r9,r12 |
| 1754 ldr r1,[sp,#17*4] @ pull inp |
| 1755 ldr r12,[sp,#18*4] @ pull inp+len |
| 1756 add r10,r10,r0 |
| 1757 add r11,r11,r2 |
| 1758 stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} |
| 1759 cmp r1,r12 |
| 1760 sub r14,r14,#256 @ rewind Ktbl |
| 1761 bne .Loop |
| 1762 |
| 1763 add sp,sp,#19*4 @ destroy frame |
| 1764 #if __ARM_ARCH__>=5 |
| 1765 ldmia sp!,{r4-r11,pc} |
| 1766 #else |
| 1767 ldmia sp!,{r4-r11,lr} |
| 1768 tst lr,#1 |
| 1769 moveq pc,lr @ be binary compatible with V4, yet |
| 1770 .word 0xe12fff1e @ interoperable with Thumb ISA:-
) |
| 1771 #endif |
| 1772 #if __ARM_ARCH__>=7 |
| 1773 .fpu neon |
| 1774 .align 4 |
| 1775 .LNEON: |
| 1776 stmdb sp!,{r4-r12,lr} |
| 1777 |
| 1778 mov r12,sp |
| 1779 sub sp,sp,#16*4+16 @ alloca |
| 1780 sub r14,r3,#256+32 @ K256 |
| 1781 bic sp,sp,#15 @ align for 128-bit stores |
| 1782 |
| 1783 vld1.8 {q0},[r1]! |
| 1784 vld1.8 {q1},[r1]! |
| 1785 vld1.8 {q2},[r1]! |
| 1786 vld1.8 {q3},[r1]! |
| 1787 vld1.32 {q8},[r14,:128]! |
| 1788 vld1.32 {q9},[r14,:128]! |
| 1789 vld1.32 {q10},[r14,:128]! |
| 1790 vld1.32 {q11},[r14,:128]! |
| 1791 vrev32.8 q0,q0 @ yes, even on |
| 1792 str r0,[sp,#64] |
| 1793 vrev32.8 q1,q1 @ big-endian |
| 1794 str r1,[sp,#68] |
| 1795 mov r1,sp |
| 1796 vrev32.8 q2,q2 |
| 1797 str r2,[sp,#72] |
| 1798 vrev32.8 q3,q3 |
| 1799 str r12,[sp,#76] @ save original sp |
| 1800 vadd.i32 q8,q8,q0 |
| 1801 vadd.i32 q9,q9,q1 |
| 1802 vst1.32 {q8},[r1,:128]! |
| 1803 vadd.i32 q10,q10,q2 |
| 1804 vst1.32 {q9},[r1,:128]! |
| 1805 vadd.i32 q11,q11,q3 |
| 1806 vst1.32 {q10},[r1,:128]! |
| 1807 vst1.32 {q11},[r1,:128]! |
| 1808 |
| 1809 ldmia r0,{r4-r11} |
| 1810 sub r1,r1,#64 |
| 1811 ldr r2,[sp,#0] |
| 1812 eor r12,r12,r12 |
| 1813 eor r3,r5,r6 |
| 1814 b .L_00_48 |
| 1815 |
| 1816 .align 4 |
| 1817 .L_00_48: |
| 1818 vext.8 q8,q0,q1,#4 |
| 1819 add r11,r11,r2 |
| 1820 eor r2,r9,r10 |
| 1821 eor r0,r8,r8,ror#5 |
| 1822 vext.8 q9,q2,q3,#4 |
| 1823 add r4,r4,r12 |
| 1824 and r2,r2,r8 |
| 1825 eor r12,r0,r8,ror#19 |
| 1826 vshr.u32 q10,q8,#7 |
| 1827 eor r0,r4,r4,ror#11 |
| 1828 eor r2,r2,r10 |
| 1829 vadd.i32 q0,q0,q9 |
| 1830 add r11,r11,r12,ror#6 |
| 1831 eor r12,r4,r5 |
| 1832 vshr.u32 q9,q8,#3 |
| 1833 eor r0,r0,r4,ror#20 |
| 1834 add r11,r11,r2 |
| 1835 vsli.32 q10,q8,#25 |
| 1836 ldr r2,[sp,#4] |
| 1837 and r3,r3,r12 |
| 1838 vshr.u32 q11,q8,#18 |
| 1839 add r7,r7,r11 |
| 1840 add r11,r11,r0,ror#2 |
| 1841 eor r3,r3,r5 |
| 1842 veor q9,q9,q10 |
| 1843 add r10,r10,r2 |
| 1844 vsli.32 q11,q8,#14 |
| 1845 eor r2,r8,r9 |
| 1846 eor r0,r7,r7,ror#5 |
| 1847 vshr.u32 d24,d7,#17 |
| 1848 add r11,r11,r3 |
| 1849 and r2,r2,r7 |
| 1850 veor q9,q9,q11 |
| 1851 eor r3,r0,r7,ror#19 |
| 1852 eor r0,r11,r11,ror#11 |
| 1853 vsli.32 d24,d7,#15 |
| 1854 eor r2,r2,r9 |
| 1855 add r10,r10,r3,ror#6 |
| 1856 vshr.u32 d25,d7,#10 |
| 1857 eor r3,r11,r4 |
| 1858 eor r0,r0,r11,ror#20 |
| 1859 vadd.i32 q0,q0,q9 |
| 1860 add r10,r10,r2 |
| 1861 ldr r2,[sp,#8] |
| 1862 veor d25,d25,d24 |
| 1863 and r12,r12,r3 |
| 1864 add r6,r6,r10 |
| 1865 vshr.u32 d24,d7,#19 |
| 1866 add r10,r10,r0,ror#2 |
| 1867 eor r12,r12,r4 |
| 1868 vsli.32 d24,d7,#13 |
| 1869 add r9,r9,r2 |
| 1870 eor r2,r7,r8 |
| 1871 veor d25,d25,d24 |
| 1872 eor r0,r6,r6,ror#5 |
| 1873 add r10,r10,r12 |
| 1874 vadd.i32 d0,d0,d25 |
| 1875 and r2,r2,r6 |
| 1876 eor r12,r0,r6,ror#19 |
| 1877 vshr.u32 d24,d0,#17 |
| 1878 eor r0,r10,r10,ror#11 |
| 1879 eor r2,r2,r8 |
| 1880 vsli.32 d24,d0,#15 |
| 1881 add r9,r9,r12,ror#6 |
| 1882 eor r12,r10,r11 |
| 1883 vshr.u32 d25,d0,#10 |
| 1884 eor r0,r0,r10,ror#20 |
| 1885 add r9,r9,r2 |
| 1886 veor d25,d25,d24 |
| 1887 ldr r2,[sp,#12] |
| 1888 and r3,r3,r12 |
| 1889 vshr.u32 d24,d0,#19 |
| 1890 add r5,r5,r9 |
| 1891 add r9,r9,r0,ror#2 |
| 1892 eor r3,r3,r11 |
| 1893 vld1.32 {q8},[r14,:128]! |
| 1894 add r8,r8,r2 |
| 1895 vsli.32 d24,d0,#13 |
| 1896 eor r2,r6,r7 |
| 1897 eor r0,r5,r5,ror#5 |
| 1898 veor d25,d25,d24 |
| 1899 add r9,r9,r3 |
| 1900 and r2,r2,r5 |
| 1901 vadd.i32 d1,d1,d25 |
| 1902 eor r3,r0,r5,ror#19 |
| 1903 eor r0,r9,r9,ror#11 |
| 1904 vadd.i32 q8,q8,q0 |
| 1905 eor r2,r2,r7 |
| 1906 add r8,r8,r3,ror#6 |
| 1907 eor r3,r9,r10 |
| 1908 eor r0,r0,r9,ror#20 |
| 1909 add r8,r8,r2 |
| 1910 ldr r2,[sp,#16] |
| 1911 and r12,r12,r3 |
| 1912 add r4,r4,r8 |
| 1913 vst1.32 {q8},[r1,:128]! |
| 1914 add r8,r8,r0,ror#2 |
| 1915 eor r12,r12,r10 |
| 1916 vext.8 q8,q1,q2,#4 |
| 1917 add r7,r7,r2 |
| 1918 eor r2,r5,r6 |
| 1919 eor r0,r4,r4,ror#5 |
| 1920 vext.8 q9,q3,q0,#4 |
| 1921 add r8,r8,r12 |
| 1922 and r2,r2,r4 |
| 1923 eor r12,r0,r4,ror#19 |
| 1924 vshr.u32 q10,q8,#7 |
| 1925 eor r0,r8,r8,ror#11 |
| 1926 eor r2,r2,r6 |
| 1927 vadd.i32 q1,q1,q9 |
| 1928 add r7,r7,r12,ror#6 |
| 1929 eor r12,r8,r9 |
| 1930 vshr.u32 q9,q8,#3 |
| 1931 eor r0,r0,r8,ror#20 |
| 1932 add r7,r7,r2 |
| 1933 vsli.32 q10,q8,#25 |
| 1934 ldr r2,[sp,#20] |
| 1935 and r3,r3,r12 |
| 1936 vshr.u32 q11,q8,#18 |
| 1937 add r11,r11,r7 |
| 1938 add r7,r7,r0,ror#2 |
| 1939 eor r3,r3,r9 |
| 1940 veor q9,q9,q10 |
| 1941 add r6,r6,r2 |
| 1942 vsli.32 q11,q8,#14 |
| 1943 eor r2,r4,r5 |
| 1944 eor r0,r11,r11,ror#5 |
| 1945 vshr.u32 d24,d1,#17 |
| 1946 add r7,r7,r3 |
| 1947 and r2,r2,r11 |
| 1948 veor q9,q9,q11 |
| 1949 eor r3,r0,r11,ror#19 |
| 1950 eor r0,r7,r7,ror#11 |
| 1951 vsli.32 d24,d1,#15 |
| 1952 eor r2,r2,r5 |
| 1953 add r6,r6,r3,ror#6 |
| 1954 vshr.u32 d25,d1,#10 |
| 1955 eor r3,r7,r8 |
| 1956 eor r0,r0,r7,ror#20 |
| 1957 vadd.i32 q1,q1,q9 |
| 1958 add r6,r6,r2 |
| 1959 ldr r2,[sp,#24] |
| 1960 veor d25,d25,d24 |
| 1961 and r12,r12,r3 |
| 1962 add r10,r10,r6 |
| 1963 vshr.u32 d24,d1,#19 |
| 1964 add r6,r6,r0,ror#2 |
| 1965 eor r12,r12,r8 |
| 1966 vsli.32 d24,d1,#13 |
| 1967 add r5,r5,r2 |
| 1968 eor r2,r11,r4 |
| 1969 veor d25,d25,d24 |
| 1970 eor r0,r10,r10,ror#5 |
| 1971 add r6,r6,r12 |
| 1972 vadd.i32 d2,d2,d25 |
| 1973 and r2,r2,r10 |
| 1974 eor r12,r0,r10,ror#19 |
| 1975 vshr.u32 d24,d2,#17 |
| 1976 eor r0,r6,r6,ror#11 |
| 1977 eor r2,r2,r4 |
| 1978 vsli.32 d24,d2,#15 |
| 1979 add r5,r5,r12,ror#6 |
| 1980 eor r12,r6,r7 |
| 1981 vshr.u32 d25,d2,#10 |
| 1982 eor r0,r0,r6,ror#20 |
| 1983 add r5,r5,r2 |
| 1984 veor d25,d25,d24 |
| 1985 ldr r2,[sp,#28] |
| 1986 and r3,r3,r12 |
| 1987 vshr.u32 d24,d2,#19 |
| 1988 add r9,r9,r5 |
| 1989 add r5,r5,r0,ror#2 |
| 1990 eor r3,r3,r7 |
| 1991 vld1.32 {q8},[r14,:128]! |
| 1992 add r4,r4,r2 |
| 1993 vsli.32 d24,d2,#13 |
| 1994 eor r2,r10,r11 |
| 1995 eor r0,r9,r9,ror#5 |
| 1996 veor d25,d25,d24 |
| 1997 add r5,r5,r3 |
| 1998 and r2,r2,r9 |
| 1999 vadd.i32 d3,d3,d25 |
| 2000 eor r3,r0,r9,ror#19 |
| 2001 eor r0,r5,r5,ror#11 |
| 2002 vadd.i32 q8,q8,q1 |
| 2003 eor r2,r2,r11 |
| 2004 add r4,r4,r3,ror#6 |
| 2005 eor r3,r5,r6 |
| 2006 eor r0,r0,r5,ror#20 |
| 2007 add r4,r4,r2 |
| 2008 ldr r2,[sp,#32] |
| 2009 and r12,r12,r3 |
| 2010 add r8,r8,r4 |
| 2011 vst1.32 {q8},[r1,:128]! |
| 2012 add r4,r4,r0,ror#2 |
| 2013 eor r12,r12,r6 |
| 2014 vext.8 q8,q2,q3,#4 |
| 2015 add r11,r11,r2 |
| 2016 eor r2,r9,r10 |
| 2017 eor r0,r8,r8,ror#5 |
| 2018 vext.8 q9,q0,q1,#4 |
| 2019 add r4,r4,r12 |
| 2020 and r2,r2,r8 |
| 2021 eor r12,r0,r8,ror#19 |
| 2022 vshr.u32 q10,q8,#7 |
| 2023 eor r0,r4,r4,ror#11 |
| 2024 eor r2,r2,r10 |
| 2025 vadd.i32 q2,q2,q9 |
| 2026 add r11,r11,r12,ror#6 |
| 2027 eor r12,r4,r5 |
| 2028 vshr.u32 q9,q8,#3 |
| 2029 eor r0,r0,r4,ror#20 |
| 2030 add r11,r11,r2 |
| 2031 vsli.32 q10,q8,#25 |
| 2032 ldr r2,[sp,#36] |
| 2033 and r3,r3,r12 |
| 2034 vshr.u32 q11,q8,#18 |
| 2035 add r7,r7,r11 |
| 2036 add r11,r11,r0,ror#2 |
| 2037 eor r3,r3,r5 |
| 2038 veor q9,q9,q10 |
| 2039 add r10,r10,r2 |
| 2040 vsli.32 q11,q8,#14 |
| 2041 eor r2,r8,r9 |
| 2042 eor r0,r7,r7,ror#5 |
| 2043 vshr.u32 d24,d3,#17 |
| 2044 add r11,r11,r3 |
| 2045 and r2,r2,r7 |
| 2046 veor q9,q9,q11 |
| 2047 eor r3,r0,r7,ror#19 |
| 2048 eor r0,r11,r11,ror#11 |
| 2049 vsli.32 d24,d3,#15 |
| 2050 eor r2,r2,r9 |
| 2051 add r10,r10,r3,ror#6 |
| 2052 vshr.u32 d25,d3,#10 |
| 2053 eor r3,r11,r4 |
| 2054 eor r0,r0,r11,ror#20 |
| 2055 vadd.i32 q2,q2,q9 |
| 2056 add r10,r10,r2 |
| 2057 ldr r2,[sp,#40] |
| 2058 veor d25,d25,d24 |
| 2059 and r12,r12,r3 |
| 2060 add r6,r6,r10 |
| 2061 vshr.u32 d24,d3,#19 |
| 2062 add r10,r10,r0,ror#2 |
| 2063 eor r12,r12,r4 |
| 2064 vsli.32 d24,d3,#13 |
| 2065 add r9,r9,r2 |
| 2066 eor r2,r7,r8 |
| 2067 veor d25,d25,d24 |
| 2068 eor r0,r6,r6,ror#5 |
| 2069 add r10,r10,r12 |
| 2070 vadd.i32 d4,d4,d25 |
| 2071 and r2,r2,r6 |
| 2072 eor r12,r0,r6,ror#19 |
| 2073 vshr.u32 d24,d4,#17 |
| 2074 eor r0,r10,r10,ror#11 |
| 2075 eor r2,r2,r8 |
| 2076 vsli.32 d24,d4,#15 |
| 2077 add r9,r9,r12,ror#6 |
| 2078 eor r12,r10,r11 |
| 2079 vshr.u32 d25,d4,#10 |
| 2080 eor r0,r0,r10,ror#20 |
| 2081 add r9,r9,r2 |
| 2082 veor d25,d25,d24 |
| 2083 ldr r2,[sp,#44] |
| 2084 and r3,r3,r12 |
| 2085 vshr.u32 d24,d4,#19 |
| 2086 add r5,r5,r9 |
| 2087 add r9,r9,r0,ror#2 |
| 2088 eor r3,r3,r11 |
| 2089 vld1.32 {q8},[r14,:128]! |
| 2090 add r8,r8,r2 |
| 2091 vsli.32 d24,d4,#13 |
| 2092 eor r2,r6,r7 |
| 2093 eor r0,r5,r5,ror#5 |
| 2094 veor d25,d25,d24 |
| 2095 add r9,r9,r3 |
| 2096 and r2,r2,r5 |
| 2097 vadd.i32 d5,d5,d25 |
| 2098 eor r3,r0,r5,ror#19 |
| 2099 eor r0,r9,r9,ror#11 |
| 2100 vadd.i32 q8,q8,q2 |
| 2101 eor r2,r2,r7 |
| 2102 add r8,r8,r3,ror#6 |
| 2103 eor r3,r9,r10 |
| 2104 eor r0,r0,r9,ror#20 |
| 2105 add r8,r8,r2 |
| 2106 ldr r2,[sp,#48] |
| 2107 and r12,r12,r3 |
| 2108 add r4,r4,r8 |
| 2109 vst1.32 {q8},[r1,:128]! |
| 2110 add r8,r8,r0,ror#2 |
| 2111 eor r12,r12,r10 |
| 2112 vext.8 q8,q3,q0,#4 |
| 2113 add r7,r7,r2 |
| 2114 eor r2,r5,r6 |
| 2115 eor r0,r4,r4,ror#5 |
| 2116 vext.8 q9,q1,q2,#4 |
| 2117 add r8,r8,r12 |
| 2118 and r2,r2,r4 |
| 2119 eor r12,r0,r4,ror#19 |
| 2120 vshr.u32 q10,q8,#7 |
| 2121 eor r0,r8,r8,ror#11 |
| 2122 eor r2,r2,r6 |
| 2123 vadd.i32 q3,q3,q9 |
| 2124 add r7,r7,r12,ror#6 |
| 2125 eor r12,r8,r9 |
| 2126 vshr.u32 q9,q8,#3 |
| 2127 eor r0,r0,r8,ror#20 |
| 2128 add r7,r7,r2 |
| 2129 vsli.32 q10,q8,#25 |
| 2130 ldr r2,[sp,#52] |
| 2131 and r3,r3,r12 |
| 2132 vshr.u32 q11,q8,#18 |
| 2133 add r11,r11,r7 |
| 2134 add r7,r7,r0,ror#2 |
| 2135 eor r3,r3,r9 |
| 2136 veor q9,q9,q10 |
| 2137 add r6,r6,r2 |
| 2138 vsli.32 q11,q8,#14 |
| 2139 eor r2,r4,r5 |
| 2140 eor r0,r11,r11,ror#5 |
| 2141 vshr.u32 d24,d5,#17 |
| 2142 add r7,r7,r3 |
| 2143 and r2,r2,r11 |
| 2144 veor q9,q9,q11 |
| 2145 eor r3,r0,r11,ror#19 |
| 2146 eor r0,r7,r7,ror#11 |
| 2147 vsli.32 d24,d5,#15 |
| 2148 eor r2,r2,r5 |
| 2149 add r6,r6,r3,ror#6 |
| 2150 vshr.u32 d25,d5,#10 |
| 2151 eor r3,r7,r8 |
| 2152 eor r0,r0,r7,ror#20 |
| 2153 vadd.i32 q3,q3,q9 |
| 2154 add r6,r6,r2 |
| 2155 ldr r2,[sp,#56] |
| 2156 veor d25,d25,d24 |
| 2157 and r12,r12,r3 |
| 2158 add r10,r10,r6 |
| 2159 vshr.u32 d24,d5,#19 |
| 2160 add r6,r6,r0,ror#2 |
| 2161 eor r12,r12,r8 |
| 2162 vsli.32 d24,d5,#13 |
| 2163 add r5,r5,r2 |
| 2164 eor r2,r11,r4 |
| 2165 veor d25,d25,d24 |
| 2166 eor r0,r10,r10,ror#5 |
| 2167 add r6,r6,r12 |
| 2168 vadd.i32 d6,d6,d25 |
| 2169 and r2,r2,r10 |
| 2170 eor r12,r0,r10,ror#19 |
| 2171 vshr.u32 d24,d6,#17 |
| 2172 eor r0,r6,r6,ror#11 |
| 2173 eor r2,r2,r4 |
| 2174 vsli.32 d24,d6,#15 |
| 2175 add r5,r5,r12,ror#6 |
| 2176 eor r12,r6,r7 |
| 2177 vshr.u32 d25,d6,#10 |
| 2178 eor r0,r0,r6,ror#20 |
| 2179 add r5,r5,r2 |
| 2180 veor d25,d25,d24 |
| 2181 ldr r2,[sp,#60] |
| 2182 and r3,r3,r12 |
| 2183 vshr.u32 d24,d6,#19 |
| 2184 add r9,r9,r5 |
| 2185 add r5,r5,r0,ror#2 |
| 2186 eor r3,r3,r7 |
| 2187 vld1.32 {q8},[r14,:128]! |
| 2188 add r4,r4,r2 |
| 2189 vsli.32 d24,d6,#13 |
| 2190 eor r2,r10,r11 |
| 2191 eor r0,r9,r9,ror#5 |
| 2192 veor d25,d25,d24 |
| 2193 add r5,r5,r3 |
| 2194 and r2,r2,r9 |
| 2195 vadd.i32 d7,d7,d25 |
| 2196 eor r3,r0,r9,ror#19 |
| 2197 eor r0,r5,r5,ror#11 |
| 2198 vadd.i32 q8,q8,q3 |
| 2199 eor r2,r2,r11 |
| 2200 add r4,r4,r3,ror#6 |
| 2201 eor r3,r5,r6 |
| 2202 eor r0,r0,r5,ror#20 |
| 2203 add r4,r4,r2 |
| 2204 ldr r2,[r14] |
| 2205 and r12,r12,r3 |
| 2206 add r8,r8,r4 |
| 2207 vst1.32 {q8},[r1,:128]! |
| 2208 add r4,r4,r0,ror#2 |
| 2209 eor r12,r12,r6 |
| 2210 teq r2,#0 @ check for K256 terminator |
| 2211 ldr r2,[sp,#0] |
| 2212 sub r1,r1,#64 |
| 2213 bne .L_00_48 |
| 2214 |
| 2215 ldr r1,[sp,#68] |
| 2216 ldr r0,[sp,#72] |
| 2217 sub r14,r14,#256 @ rewind r14 |
| 2218 teq r1,r0 |
| 2219 subeq r1,r1,#64 @ avoid SEGV |
| 2220 vld1.8 {q0},[r1]! @ load next input block |
| 2221 vld1.8 {q1},[r1]! |
| 2222 vld1.8 {q2},[r1]! |
| 2223 vld1.8 {q3},[r1]! |
| 2224 strne r1,[sp,#68] |
| 2225 mov r1,sp |
| 2226 add r11,r11,r2 |
| 2227 eor r2,r9,r10 |
| 2228 eor r0,r8,r8,ror#5 |
| 2229 add r4,r4,r12 |
| 2230 vld1.32 {q8},[r14,:128]! |
| 2231 and r2,r2,r8 |
| 2232 eor r12,r0,r8,ror#19 |
| 2233 eor r0,r4,r4,ror#11 |
| 2234 eor r2,r2,r10 |
| 2235 vrev32.8 q0,q0 |
| 2236 add r11,r11,r12,ror#6 |
| 2237 eor r12,r4,r5 |
| 2238 eor r0,r0,r4,ror#20 |
| 2239 add r11,r11,r2 |
| 2240 vadd.i32 q8,q8,q0 |
| 2241 ldr r2,[sp,#4] |
| 2242 and r3,r3,r12 |
| 2243 add r7,r7,r11 |
| 2244 add r11,r11,r0,ror#2 |
| 2245 eor r3,r3,r5 |
| 2246 add r10,r10,r2 |
| 2247 eor r2,r8,r9 |
| 2248 eor r0,r7,r7,ror#5 |
| 2249 add r11,r11,r3 |
| 2250 and r2,r2,r7 |
| 2251 eor r3,r0,r7,ror#19 |
| 2252 eor r0,r11,r11,ror#11 |
| 2253 eor r2,r2,r9 |
| 2254 add r10,r10,r3,ror#6 |
| 2255 eor r3,r11,r4 |
| 2256 eor r0,r0,r11,ror#20 |
| 2257 add r10,r10,r2 |
| 2258 ldr r2,[sp,#8] |
| 2259 and r12,r12,r3 |
| 2260 add r6,r6,r10 |
| 2261 add r10,r10,r0,ror#2 |
| 2262 eor r12,r12,r4 |
| 2263 add r9,r9,r2 |
| 2264 eor r2,r7,r8 |
| 2265 eor r0,r6,r6,ror#5 |
| 2266 add r10,r10,r12 |
| 2267 and r2,r2,r6 |
| 2268 eor r12,r0,r6,ror#19 |
| 2269 eor r0,r10,r10,ror#11 |
| 2270 eor r2,r2,r8 |
| 2271 add r9,r9,r12,ror#6 |
| 2272 eor r12,r10,r11 |
| 2273 eor r0,r0,r10,ror#20 |
| 2274 add r9,r9,r2 |
| 2275 ldr r2,[sp,#12] |
| 2276 and r3,r3,r12 |
| 2277 add r5,r5,r9 |
| 2278 add r9,r9,r0,ror#2 |
| 2279 eor r3,r3,r11 |
| 2280 add r8,r8,r2 |
| 2281 eor r2,r6,r7 |
| 2282 eor r0,r5,r5,ror#5 |
| 2283 add r9,r9,r3 |
| 2284 and r2,r2,r5 |
| 2285 eor r3,r0,r5,ror#19 |
| 2286 eor r0,r9,r9,ror#11 |
| 2287 eor r2,r2,r7 |
| 2288 add r8,r8,r3,ror#6 |
| 2289 eor r3,r9,r10 |
| 2290 eor r0,r0,r9,ror#20 |
| 2291 add r8,r8,r2 |
| 2292 ldr r2,[sp,#16] |
| 2293 and r12,r12,r3 |
| 2294 add r4,r4,r8 |
| 2295 add r8,r8,r0,ror#2 |
| 2296 eor r12,r12,r10 |
| 2297 vst1.32 {q8},[r1,:128]! |
| 2298 add r7,r7,r2 |
| 2299 eor r2,r5,r6 |
| 2300 eor r0,r4,r4,ror#5 |
| 2301 add r8,r8,r12 |
| 2302 vld1.32 {q8},[r14,:128]! |
| 2303 and r2,r2,r4 |
| 2304 eor r12,r0,r4,ror#19 |
| 2305 eor r0,r8,r8,ror#11 |
| 2306 eor r2,r2,r6 |
| 2307 vrev32.8 q1,q1 |
| 2308 add r7,r7,r12,ror#6 |
| 2309 eor r12,r8,r9 |
| 2310 eor r0,r0,r8,ror#20 |
| 2311 add r7,r7,r2 |
| 2312 vadd.i32 q8,q8,q1 |
| 2313 ldr r2,[sp,#20] |
| 2314 and r3,r3,r12 |
| 2315 add r11,r11,r7 |
| 2316 add r7,r7,r0,ror#2 |
| 2317 eor r3,r3,r9 |
| 2318 add r6,r6,r2 |
| 2319 eor r2,r4,r5 |
| 2320 eor r0,r11,r11,ror#5 |
| 2321 add r7,r7,r3 |
| 2322 and r2,r2,r11 |
| 2323 eor r3,r0,r11,ror#19 |
| 2324 eor r0,r7,r7,ror#11 |
| 2325 eor r2,r2,r5 |
| 2326 add r6,r6,r3,ror#6 |
| 2327 eor r3,r7,r8 |
| 2328 eor r0,r0,r7,ror#20 |
| 2329 add r6,r6,r2 |
| 2330 ldr r2,[sp,#24] |
| 2331 and r12,r12,r3 |
| 2332 add r10,r10,r6 |
| 2333 add r6,r6,r0,ror#2 |
| 2334 eor r12,r12,r8 |
| 2335 add r5,r5,r2 |
| 2336 eor r2,r11,r4 |
| 2337 eor r0,r10,r10,ror#5 |
| 2338 add r6,r6,r12 |
| 2339 and r2,r2,r10 |
| 2340 eor r12,r0,r10,ror#19 |
| 2341 eor r0,r6,r6,ror#11 |
| 2342 eor r2,r2,r4 |
| 2343 add r5,r5,r12,ror#6 |
| 2344 eor r12,r6,r7 |
| 2345 eor r0,r0,r6,ror#20 |
| 2346 add r5,r5,r2 |
| 2347 ldr r2,[sp,#28] |
| 2348 and r3,r3,r12 |
| 2349 add r9,r9,r5 |
| 2350 add r5,r5,r0,ror#2 |
| 2351 eor r3,r3,r7 |
| 2352 add r4,r4,r2 |
| 2353 eor r2,r10,r11 |
| 2354 eor r0,r9,r9,ror#5 |
| 2355 add r5,r5,r3 |
| 2356 and r2,r2,r9 |
| 2357 eor r3,r0,r9,ror#19 |
| 2358 eor r0,r5,r5,ror#11 |
| 2359 eor r2,r2,r11 |
| 2360 add r4,r4,r3,ror#6 |
| 2361 eor r3,r5,r6 |
| 2362 eor r0,r0,r5,ror#20 |
| 2363 add r4,r4,r2 |
| 2364 ldr r2,[sp,#32] |
| 2365 and r12,r12,r3 |
| 2366 add r8,r8,r4 |
| 2367 add r4,r4,r0,ror#2 |
| 2368 eor r12,r12,r6 |
| 2369 vst1.32 {q8},[r1,:128]! |
| 2370 add r11,r11,r2 |
| 2371 eor r2,r9,r10 |
| 2372 eor r0,r8,r8,ror#5 |
| 2373 add r4,r4,r12 |
| 2374 vld1.32 {q8},[r14,:128]! |
| 2375 and r2,r2,r8 |
| 2376 eor r12,r0,r8,ror#19 |
| 2377 eor r0,r4,r4,ror#11 |
| 2378 eor r2,r2,r10 |
| 2379 vrev32.8 q2,q2 |
| 2380 add r11,r11,r12,ror#6 |
| 2381 eor r12,r4,r5 |
| 2382 eor r0,r0,r4,ror#20 |
| 2383 add r11,r11,r2 |
| 2384 vadd.i32 q8,q8,q2 |
| 2385 ldr r2,[sp,#36] |
| 2386 and r3,r3,r12 |
| 2387 add r7,r7,r11 |
| 2388 add r11,r11,r0,ror#2 |
| 2389 eor r3,r3,r5 |
| 2390 add r10,r10,r2 |
| 2391 eor r2,r8,r9 |
| 2392 eor r0,r7,r7,ror#5 |
| 2393 add r11,r11,r3 |
| 2394 and r2,r2,r7 |
| 2395 eor r3,r0,r7,ror#19 |
| 2396 eor r0,r11,r11,ror#11 |
| 2397 eor r2,r2,r9 |
| 2398 add r10,r10,r3,ror#6 |
| 2399 eor r3,r11,r4 |
| 2400 eor r0,r0,r11,ror#20 |
| 2401 add r10,r10,r2 |
| 2402 ldr r2,[sp,#40] |
| 2403 and r12,r12,r3 |
| 2404 add r6,r6,r10 |
| 2405 add r10,r10,r0,ror#2 |
| 2406 eor r12,r12,r4 |
| 2407 add r9,r9,r2 |
| 2408 eor r2,r7,r8 |
| 2409 eor r0,r6,r6,ror#5 |
| 2410 add r10,r10,r12 |
| 2411 and r2,r2,r6 |
| 2412 eor r12,r0,r6,ror#19 |
| 2413 eor r0,r10,r10,ror#11 |
| 2414 eor r2,r2,r8 |
| 2415 add r9,r9,r12,ror#6 |
| 2416 eor r12,r10,r11 |
| 2417 eor r0,r0,r10,ror#20 |
| 2418 add r9,r9,r2 |
| 2419 ldr r2,[sp,#44] |
| 2420 and r3,r3,r12 |
| 2421 add r5,r5,r9 |
| 2422 add r9,r9,r0,ror#2 |
| 2423 eor r3,r3,r11 |
| 2424 add r8,r8,r2 |
| 2425 eor r2,r6,r7 |
| 2426 eor r0,r5,r5,ror#5 |
| 2427 add r9,r9,r3 |
| 2428 and r2,r2,r5 |
| 2429 eor r3,r0,r5,ror#19 |
| 2430 eor r0,r9,r9,ror#11 |
| 2431 eor r2,r2,r7 |
| 2432 add r8,r8,r3,ror#6 |
| 2433 eor r3,r9,r10 |
| 2434 eor r0,r0,r9,ror#20 |
| 2435 add r8,r8,r2 |
| 2436 ldr r2,[sp,#48] |
| 2437 and r12,r12,r3 |
| 2438 add r4,r4,r8 |
| 2439 add r8,r8,r0,ror#2 |
| 2440 eor r12,r12,r10 |
| 2441 vst1.32 {q8},[r1,:128]! |
| 2442 add r7,r7,r2 |
| 2443 eor r2,r5,r6 |
| 2444 eor r0,r4,r4,ror#5 |
| 2445 add r8,r8,r12 |
| 2446 vld1.32 {q8},[r14,:128]! |
| 2447 and r2,r2,r4 |
| 2448 eor r12,r0,r4,ror#19 |
| 2449 eor r0,r8,r8,ror#11 |
| 2450 eor r2,r2,r6 |
| 2451 vrev32.8 q3,q3 |
| 2452 add r7,r7,r12,ror#6 |
| 2453 eor r12,r8,r9 |
| 2454 eor r0,r0,r8,ror#20 |
| 2455 add r7,r7,r2 |
| 2456 vadd.i32 q8,q8,q3 |
| 2457 ldr r2,[sp,#52] |
| 2458 and r3,r3,r12 |
| 2459 add r11,r11,r7 |
| 2460 add r7,r7,r0,ror#2 |
| 2461 eor r3,r3,r9 |
| 2462 add r6,r6,r2 |
| 2463 eor r2,r4,r5 |
| 2464 eor r0,r11,r11,ror#5 |
| 2465 add r7,r7,r3 |
| 2466 and r2,r2,r11 |
| 2467 eor r3,r0,r11,ror#19 |
| 2468 eor r0,r7,r7,ror#11 |
| 2469 eor r2,r2,r5 |
| 2470 add r6,r6,r3,ror#6 |
| 2471 eor r3,r7,r8 |
| 2472 eor r0,r0,r7,ror#20 |
| 2473 add r6,r6,r2 |
| 2474 ldr r2,[sp,#56] |
| 2475 and r12,r12,r3 |
| 2476 add r10,r10,r6 |
| 2477 add r6,r6,r0,ror#2 |
| 2478 eor r12,r12,r8 |
| 2479 add r5,r5,r2 |
| 2480 eor r2,r11,r4 |
| 2481 eor r0,r10,r10,ror#5 |
| 2482 add r6,r6,r12 |
| 2483 and r2,r2,r10 |
| 2484 eor r12,r0,r10,ror#19 |
| 2485 eor r0,r6,r6,ror#11 |
| 2486 eor r2,r2,r4 |
| 2487 add r5,r5,r12,ror#6 |
| 2488 eor r12,r6,r7 |
| 2489 eor r0,r0,r6,ror#20 |
| 2490 add r5,r5,r2 |
| 2491 ldr r2,[sp,#60] |
| 2492 and r3,r3,r12 |
| 2493 add r9,r9,r5 |
| 2494 add r5,r5,r0,ror#2 |
| 2495 eor r3,r3,r7 |
| 2496 add r4,r4,r2 |
| 2497 eor r2,r10,r11 |
| 2498 eor r0,r9,r9,ror#5 |
| 2499 add r5,r5,r3 |
| 2500 and r2,r2,r9 |
| 2501 eor r3,r0,r9,ror#19 |
| 2502 eor r0,r5,r5,ror#11 |
| 2503 eor r2,r2,r11 |
| 2504 add r4,r4,r3,ror#6 |
| 2505 eor r3,r5,r6 |
| 2506 eor r0,r0,r5,ror#20 |
| 2507 add r4,r4,r2 |
| 2508 ldr r2,[sp,#64] |
| 2509 and r12,r12,r3 |
| 2510 add r8,r8,r4 |
| 2511 add r4,r4,r0,ror#2 |
| 2512 eor r12,r12,r6 |
| 2513 vst1.32 {q8},[r1,:128]! |
| 2514 ldr r0,[r2,#0] |
| 2515 add r4,r4,r12 @ h+=Maj(a,b,c) from the past |
| 2516 ldr r12,[r2,#4] |
| 2517 ldr r3,[r2,#8] |
| 2518 ldr r1,[r2,#12] |
| 2519 add r4,r4,r0 @ accumulate |
| 2520 ldr r0,[r2,#16] |
| 2521 add r5,r5,r12 |
| 2522 ldr r12,[r2,#20] |
| 2523 add r6,r6,r3 |
| 2524 ldr r3,[r2,#24] |
| 2525 add r7,r7,r1 |
| 2526 ldr r1,[r2,#28] |
| 2527 add r8,r8,r0 |
| 2528 str r4,[r2],#4 |
| 2529 add r9,r9,r12 |
| 2530 str r5,[r2],#4 |
| 2531 add r10,r10,r3 |
| 2532 str r6,[r2],#4 |
| 2533 add r11,r11,r1 |
| 2534 str r7,[r2],#4 |
| 2535 stmia r2,{r8-r11} |
| 2536 |
| 2537 movne r1,sp |
| 2538 ldrne r2,[sp,#0] |
| 2539 eorne r12,r12,r12 |
| 2540 ldreq sp,[sp,#76] @ restore original sp |
| 2541 eorne r3,r5,r6 |
| 2542 bne .L_00_48 |
| 2543 |
| 2544 ldmia sp!,{r4-r12,pc} |
| 2545 #endif |
| 2546 .size sha256_block_data_order,.-sha256_block_data_order |
| 2547 .asciz "SHA256 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org
>" |
| 2548 .align 2 |
| 2549 .comm OPENSSL_armcap_P,4,4 |
| 2550 |
| 2551 #endif |
OLD | NEW |