| OLD | NEW |
| 1 #if defined(__arm__) | 1 #if defined(__arm__) |
| 2 #include "arm_arch.h" | 2 #include <openssl/arm_arch.h> |
| 3 | 3 |
| 4 .text | 4 .text |
| 5 .fpu neon | 5 .fpu neon |
| 6 .code 32 | 6 .code 32 |
| 7 .globl gcm_init_v8 | 7 .globl gcm_init_v8 |
| 8 .hidden gcm_init_v8 |
| 8 .type gcm_init_v8,%function | 9 .type gcm_init_v8,%function |
| 9 .align 4 | 10 .align 4 |
| 10 gcm_init_v8: | 11 gcm_init_v8: |
| 11 vld1.64 {q9},[r1] @ load input H | 12 vld1.64 {q9},[r1] @ load input H |
| 12 vmov.i8 q11,#0xe1 | 13 vmov.i8 q11,#0xe1 |
| 13 vshl.i64 q11,q11,#57 @ 0xc2.0 | 14 vshl.i64 q11,q11,#57 @ 0xc2.0 |
| 14 vext.8 q3,q9,q9,#8 | 15 vext.8 q3,q9,q9,#8 |
| 15 vshr.u64 q10,q11,#63 | 16 vshr.u64 q10,q11,#63 |
| 16 vdup.32 q9,d18[1] | 17 vdup.32 q9,d18[1] |
| 17 vext.8 q8,q10,q11,#8 @ t0=0xc2....01 | 18 vext.8 q8,q10,q11,#8 @ t0=0xc2....01 |
| (...skipping 30 matching lines...) Expand all Loading... |
| 48 veor q14,q0,q10 | 49 veor q14,q0,q10 |
| 49 | 50 |
| 50 vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing | 51 vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing |
| 51 veor q9,q9,q14 | 52 veor q9,q9,q14 |
| 52 vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed | 53 vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed |
| 53 vst1.64 {q13,q14},[r0] @ store Htable[1..2] | 54 vst1.64 {q13,q14},[r0] @ store Htable[1..2] |
| 54 | 55 |
| 55 bx lr | 56 bx lr |
| 56 .size gcm_init_v8,.-gcm_init_v8 | 57 .size gcm_init_v8,.-gcm_init_v8 |
| 57 .globl gcm_gmult_v8 | 58 .globl gcm_gmult_v8 |
| 59 .hidden gcm_gmult_v8 |
| 58 .type gcm_gmult_v8,%function | 60 .type gcm_gmult_v8,%function |
| 59 .align 4 | 61 .align 4 |
| 60 gcm_gmult_v8: | 62 gcm_gmult_v8: |
| 61 vld1.64 {q9},[r0] @ load Xi | 63 vld1.64 {q9},[r0] @ load Xi |
| 62 vmov.i8 q11,#0xe1 | 64 vmov.i8 q11,#0xe1 |
| 63 vld1.64 {q12,q13},[r1] @ load twisted H, ... | 65 vld1.64 {q12,q13},[r1] @ load twisted H, ... |
| 64 vshl.u64 q11,q11,#57 | 66 vshl.u64 q11,q11,#57 |
| 65 #ifndef __ARMEB__ | 67 #ifndef __ARMEB__ |
| 66 vrev64.8 q9,q9 | 68 vrev64.8 q9,q9 |
| 67 #endif | 69 #endif |
| 68 vext.8 q3,q9,q9,#8 | 70 vext.8 q3,q9,q9,#8 |
| 69 | 71 |
| 70 .byte» 0x86,0x0e,0xa8,0xf2» @ pmull q0,q12,q3» » @ H.lo·Xi.lo | 72 .byte» 0x86,0x0e,0xa8,0xf2» @ pmull q0,q12,q3» » @ H.lo·Xi.lo |
| 71 veor q9,q9,q3 @ Karatsuba pre-processing | 73 veor q9,q9,q3 @ Karatsuba pre-processing |
| 72 .byte» 0x87,0x4e,0xa9,0xf2» @ pmull2 q2,q12,q3» » @ H.hi·Xi.hi | 74 .byte» 0x87,0x4e,0xa9,0xf2» @ pmull2 q2,q12,q3» » @ H.hi·Xi.hi |
| 73 .byte» 0xa2,0x2e,0xaa,0xf2» @ pmull q1,q13,q9» » @ (H.lo+H.hi)·(X
i.lo+Xi.hi) | 75 .byte» 0xa2,0x2e,0xaa,0xf2» @ pmull q1,q13,q9» » @ (H.lo+H.hi)·(
Xi.lo+Xi.hi) |
| 74 | 76 |
| 75 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing | 77 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing |
| 76 veor q10,q0,q2 | 78 veor q10,q0,q2 |
| 77 veor q1,q1,q9 | 79 veor q1,q1,q9 |
| 78 veor q1,q1,q10 | 80 veor q1,q1,q10 |
| 79 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction | 81 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction |
| 80 | 82 |
| 81 vmov d4,d3 @ Xh|Xm - 256-bit result | 83 vmov d4,d3 @ Xh|Xm - 256-bit result |
| 82 vmov d3,d0 @ Xm is rotated Xl | 84 vmov d3,d0 @ Xm is rotated Xl |
| 83 veor q0,q1,q10 | 85 veor q0,q1,q10 |
| 84 | 86 |
| 85 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction | 87 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction |
| 86 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 | 88 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 |
| 87 veor q10,q10,q2 | 89 veor q10,q10,q2 |
| 88 veor q0,q0,q10 | 90 veor q0,q0,q10 |
| 89 | 91 |
| 90 #ifndef __ARMEB__ | 92 #ifndef __ARMEB__ |
| 91 vrev64.8 q0,q0 | 93 vrev64.8 q0,q0 |
| 92 #endif | 94 #endif |
| 93 vext.8 q0,q0,q0,#8 | 95 vext.8 q0,q0,q0,#8 |
| 94 vst1.64 {q0},[r0] @ write out Xi | 96 vst1.64 {q0},[r0] @ write out Xi |
| 95 | 97 |
| 96 bx lr | 98 bx lr |
| 97 .size gcm_gmult_v8,.-gcm_gmult_v8 | 99 .size gcm_gmult_v8,.-gcm_gmult_v8 |
| 98 .globl gcm_ghash_v8 | 100 .globl gcm_ghash_v8 |
| 101 .hidden gcm_ghash_v8 |
| 99 .type gcm_ghash_v8,%function | 102 .type gcm_ghash_v8,%function |
| 100 .align 4 | 103 .align 4 |
| 101 gcm_ghash_v8: | 104 gcm_ghash_v8: |
| 102 vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI say
s so | 105 vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI say
s so |
| 103 vld1.64 {q0},[r0] @ load [rotated] Xi | 106 vld1.64 {q0},[r0] @ load [rotated] Xi |
| 104 @ "[rotated]" means that | 107 @ "[rotated]" means that |
| 105 @ loaded value would have | 108 @ loaded value would have |
| 106 @ to be rotated in order to | 109 @ to be rotated in order to |
| 107 @ make it appear as in | 110 @ make it appear as in |
| 108 @ alorithm specification | 111 @ alorithm specification |
| (...skipping 19 matching lines...) Expand all Loading... |
| 128 vrev64.8 q0,q0 | 131 vrev64.8 q0,q0 |
| 129 #endif | 132 #endif |
| 130 vext.8 q3,q8,q8,#8 @ rotate I[0] | 133 vext.8 q3,q8,q8,#8 @ rotate I[0] |
| 131 blo .Lodd_tail_v8 @ r3 was less than 32 | 134 blo .Lodd_tail_v8 @ r3 was less than 32 |
| 132 vld1.64 {q9},[r2],r12 @ load [rotated] I[1] | 135 vld1.64 {q9},[r2],r12 @ load [rotated] I[1] |
| 133 #ifndef __ARMEB__ | 136 #ifndef __ARMEB__ |
| 134 vrev64.8 q9,q9 | 137 vrev64.8 q9,q9 |
| 135 #endif | 138 #endif |
| 136 vext.8 q7,q9,q9,#8 | 139 vext.8 q7,q9,q9,#8 |
| 137 veor q3,q3,q0 @ I[i]^=Xi | 140 veor q3,q3,q0 @ I[i]^=Xi |
| 138 .byte» 0x8e,0x8e,0xa8,0xf2» @ pmull q4,q12,q7» » @ H·Ii+1 | 141 .byte» 0x8e,0x8e,0xa8,0xf2» @ pmull q4,q12,q7» » @ H·Ii+1 |
| 139 veor q9,q9,q7 @ Karatsuba pre-processing | 142 veor q9,q9,q7 @ Karatsuba pre-processing |
| 140 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 | 143 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 |
| 141 b .Loop_mod2x_v8 | 144 b .Loop_mod2x_v8 |
| 142 | 145 |
| 143 .align 4 | 146 .align 4 |
| 144 .Loop_mod2x_v8: | 147 .Loop_mod2x_v8: |
| 145 vext.8 q10,q3,q3,#8 | 148 vext.8 q10,q3,q3,#8 |
| 146 subs r3,r3,#32 @ is there more data? | 149 subs r3,r3,#32 @ is there more data? |
| 147 .byte» 0x86,0x0e,0xac,0xf2» @ pmull q0,q14,q3» » @ H^2.lo·Xi.lo | 150 .byte» 0x86,0x0e,0xac,0xf2» @ pmull q0,q14,q3» » @ H^2.lo·Xi.lo |
| 148 movlo r12,#0 @ is it time to zero r12? | 151 movlo r12,#0 @ is it time to zero r12? |
| 149 | 152 |
| 150 .byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9 | 153 .byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9 |
| 151 veor q10,q10,q3 @ Karatsuba pre-processing | 154 veor q10,q10,q3 @ Karatsuba pre-processing |
| 152 .byte» 0x87,0x4e,0xad,0xf2» @ pmull2 q2,q14,q3» » @ H^2.hi·Xi.hi | 155 .byte» 0x87,0x4e,0xad,0xf2» @ pmull2 q2,q14,q3» » @ H^2.hi·Xi.hi |
| 153 veor q0,q0,q4 @ accumulate | 156 veor q0,q0,q4 @ accumulate |
| 154 .byte» 0xa5,0x2e,0xab,0xf2» @ pmull2 q1,q13,q10» » @ (H^2.lo+H^2.hi
)·(Xi.lo+Xi.hi) | 157 .byte» 0xa5,0x2e,0xab,0xf2» @ pmull2 q1,q13,q10» » @ (H^2.lo+H^2.hi
)·(Xi.lo+Xi.hi) |
| 155 vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2] | 158 vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2] |
| 156 | 159 |
| 157 veor q2,q2,q6 | 160 veor q2,q2,q6 |
| 158 moveq r12,#0 @ is it time to zero r12? | 161 moveq r12,#0 @ is it time to zero r12? |
| 159 veor q1,q1,q5 | 162 veor q1,q1,q5 |
| 160 | 163 |
| 161 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing | 164 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing |
| 162 veor q10,q0,q2 | 165 veor q10,q0,q2 |
| 163 veor q1,q1,q9 | 166 veor q1,q1,q9 |
| 164 vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3] | 167 vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3] |
| 165 #ifndef __ARMEB__ | 168 #ifndef __ARMEB__ |
| 166 vrev64.8 q8,q8 | 169 vrev64.8 q8,q8 |
| 167 #endif | 170 #endif |
| 168 veor q1,q1,q10 | 171 veor q1,q1,q10 |
| 169 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction | 172 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction |
| 170 | 173 |
| 171 #ifndef __ARMEB__ | 174 #ifndef __ARMEB__ |
| 172 vrev64.8 q9,q9 | 175 vrev64.8 q9,q9 |
| 173 #endif | 176 #endif |
| 174 vmov d4,d3 @ Xh|Xm - 256-bit result | 177 vmov d4,d3 @ Xh|Xm - 256-bit result |
| 175 vmov d3,d0 @ Xm is rotated Xl | 178 vmov d3,d0 @ Xm is rotated Xl |
| 176 vext.8 q7,q9,q9,#8 | 179 vext.8 q7,q9,q9,#8 |
| 177 vext.8 q3,q8,q8,#8 | 180 vext.8 q3,q8,q8,#8 |
| 178 veor q0,q1,q10 | 181 veor q0,q1,q10 |
| 179 .byte» 0x8e,0x8e,0xa8,0xf2» @ pmull q4,q12,q7» » @ H·Ii+1 | 182 .byte» 0x8e,0x8e,0xa8,0xf2» @ pmull q4,q12,q7» » @ H·Ii+1 |
| 180 veor q3,q3,q2 @ accumulate q3 early | 183 veor q3,q3,q2 @ accumulate q3 early |
| 181 | 184 |
| 182 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction | 185 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction |
| 183 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 | 186 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 |
| 184 veor q3,q3,q10 | 187 veor q3,q3,q10 |
| 185 veor q9,q9,q7 @ Karatsuba pre-processing | 188 veor q9,q9,q7 @ Karatsuba pre-processing |
| 186 veor q3,q3,q0 | 189 veor q3,q3,q0 |
| 187 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 | 190 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 |
| 188 bhs .Loop_mod2x_v8 @ there was at least 32 more bytes | 191 bhs .Loop_mod2x_v8 @ there was at least 32 more bytes |
| 189 | 192 |
| 190 veor q2,q2,q10 | 193 veor q2,q2,q10 |
| 191 vext.8 q3,q8,q8,#8 @ re-construct q3 | 194 vext.8 q3,q8,q8,#8 @ re-construct q3 |
| 192 adds r3,r3,#32 @ re-construct r3 | 195 adds r3,r3,#32 @ re-construct r3 |
| 193 veor q0,q0,q2 @ re-construct q0 | 196 veor q0,q0,q2 @ re-construct q0 |
| 194 beq .Ldone_v8 @ is r3 zero? | 197 beq .Ldone_v8 @ is r3 zero? |
| 195 .Lodd_tail_v8: | 198 .Lodd_tail_v8: |
| 196 vext.8 q10,q0,q0,#8 | 199 vext.8 q10,q0,q0,#8 |
| 197 veor q3,q3,q0 @ inp^=Xi | 200 veor q3,q3,q0 @ inp^=Xi |
| 198 veor q9,q8,q10 @ q9 is rotated inp^Xi | 201 veor q9,q8,q10 @ q9 is rotated inp^Xi |
| 199 | 202 |
| 200 .byte» 0x86,0x0e,0xa8,0xf2» @ pmull q0,q12,q3» » @ H.lo·Xi.lo | 203 .byte» 0x86,0x0e,0xa8,0xf2» @ pmull q0,q12,q3» » @ H.lo·Xi.lo |
| 201 veor q9,q9,q3 @ Karatsuba pre-processing | 204 veor q9,q9,q3 @ Karatsuba pre-processing |
| 202 .byte» 0x87,0x4e,0xa9,0xf2» @ pmull2 q2,q12,q3» » @ H.hi·Xi.hi | 205 .byte» 0x87,0x4e,0xa9,0xf2» @ pmull2 q2,q12,q3» » @ H.hi·Xi.hi |
| 203 .byte» 0xa2,0x2e,0xaa,0xf2» @ pmull q1,q13,q9» » @ (H.lo+H.hi)·(X
i.lo+Xi.hi) | 206 .byte» 0xa2,0x2e,0xaa,0xf2» @ pmull q1,q13,q9» » @ (H.lo+H.hi)·(
Xi.lo+Xi.hi) |
| 204 | 207 |
| 205 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing | 208 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing |
| 206 veor q10,q0,q2 | 209 veor q10,q0,q2 |
| 207 veor q1,q1,q9 | 210 veor q1,q1,q9 |
| 208 veor q1,q1,q10 | 211 veor q1,q1,q10 |
| 209 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction | 212 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction |
| 210 | 213 |
| 211 vmov d4,d3 @ Xh|Xm - 256-bit result | 214 vmov d4,d3 @ Xh|Xm - 256-bit result |
| 212 vmov d3,d0 @ Xm is rotated Xl | 215 vmov d3,d0 @ Xm is rotated Xl |
| 213 veor q0,q1,q10 | 216 veor q0,q1,q10 |
| 214 | 217 |
| 215 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction | 218 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction |
| 216 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 | 219 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 |
| 217 veor q10,q10,q2 | 220 veor q10,q10,q2 |
| 218 veor q0,q0,q10 | 221 veor q0,q0,q10 |
| 219 | 222 |
| 220 .Ldone_v8: | 223 .Ldone_v8: |
| 221 #ifndef __ARMEB__ | 224 #ifndef __ARMEB__ |
| 222 vrev64.8 q0,q0 | 225 vrev64.8 q0,q0 |
| 223 #endif | 226 #endif |
| 224 vext.8 q0,q0,q0,#8 | 227 vext.8 q0,q0,q0,#8 |
| 225 vst1.64 {q0},[r0] @ write out Xi | 228 vst1.64 {q0},[r0] @ write out Xi |
| 226 | 229 |
| 227 vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI say
s so | 230 vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI say
s so |
| 228 bx lr | 231 bx lr |
| 229 .size gcm_ghash_v8,.-gcm_ghash_v8 | 232 .size gcm_ghash_v8,.-gcm_ghash_v8 |
| 230 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79
,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,4
6,111,114,103,62,0 | 233 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79
,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,4
6,111,114,103,62,0 |
| 231 .align 2 | 234 .align 2 |
| 232 .align 2 | 235 .align 2 |
| 233 #endif | 236 #endif |
| OLD | NEW |