OLD | NEW |
1 #if defined(__arm__) | 1 #if defined(__arm__) |
2 #include "arm_arch.h" | 2 #include <openssl/arm_arch.h> |
3 | 3 |
4 .text | 4 .text |
5 .fpu neon | 5 .fpu neon |
6 .code 32 | 6 .code 32 |
7 .globl gcm_init_v8 | 7 .globl gcm_init_v8 |
8 .type gcm_init_v8,%function | 8 .type gcm_init_v8,%function |
9 .align 4 | 9 .align 4 |
10 gcm_init_v8: | 10 gcm_init_v8: |
11 vld1.64 {q9},[r1] @ load input H | 11 vld1.64 {q9},[r1] @ load input H |
12 vmov.i8 q11,#0xe1 | 12 vmov.i8 q11,#0xe1 |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
60 gcm_gmult_v8: | 60 gcm_gmult_v8: |
61 vld1.64 {q9},[r0] @ load Xi | 61 vld1.64 {q9},[r0] @ load Xi |
62 vmov.i8 q11,#0xe1 | 62 vmov.i8 q11,#0xe1 |
63 vld1.64 {q12,q13},[r1] @ load twisted H, ... | 63 vld1.64 {q12,q13},[r1] @ load twisted H, ... |
64 vshl.u64 q11,q11,#57 | 64 vshl.u64 q11,q11,#57 |
65 #ifndef __ARMEB__ | 65 #ifndef __ARMEB__ |
66 vrev64.8 q9,q9 | 66 vrev64.8 q9,q9 |
67 #endif | 67 #endif |
68 vext.8 q3,q9,q9,#8 | 68 vext.8 q3,q9,q9,#8 |
69 | 69 |
70 .byte» 0x86,0x0e,0xa8,0xf2» @ pmull q0,q12,q3» » @ H.lo·Xi.lo | 70 .byte» 0x86,0x0e,0xa8,0xf2» @ pmull q0,q12,q3» » @ H.lo·Xi.lo |
71 veor q9,q9,q3 @ Karatsuba pre-processing | 71 veor q9,q9,q3 @ Karatsuba pre-processing |
72 .byte» 0x87,0x4e,0xa9,0xf2» @ pmull2 q2,q12,q3» » @ H.hi·Xi.hi | 72 .byte» 0x87,0x4e,0xa9,0xf2» @ pmull2 q2,q12,q3» » @ H.hi·Xi.hi |
73 .byte» 0xa2,0x2e,0xaa,0xf2» @ pmull q1,q13,q9» » @ (H.lo+H.hi)·(X
i.lo+Xi.hi) | 73 .byte» 0xa2,0x2e,0xaa,0xf2» @ pmull q1,q13,q9» » @ (H.lo+H.hi)·(
Xi.lo+Xi.hi) |
74 | 74 |
75 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing | 75 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing |
76 veor q10,q0,q2 | 76 veor q10,q0,q2 |
77 veor q1,q1,q9 | 77 veor q1,q1,q9 |
78 veor q1,q1,q10 | 78 veor q1,q1,q10 |
79 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction | 79 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction |
80 | 80 |
81 vmov d4,d3 @ Xh|Xm - 256-bit result | 81 vmov d4,d3 @ Xh|Xm - 256-bit result |
82 vmov d3,d0 @ Xm is rotated Xl | 82 vmov d3,d0 @ Xm is rotated Xl |
83 veor q0,q1,q10 | 83 veor q0,q1,q10 |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
128 vrev64.8 q0,q0 | 128 vrev64.8 q0,q0 |
129 #endif | 129 #endif |
130 vext.8 q3,q8,q8,#8 @ rotate I[0] | 130 vext.8 q3,q8,q8,#8 @ rotate I[0] |
131 blo .Lodd_tail_v8 @ r3 was less than 32 | 131 blo .Lodd_tail_v8 @ r3 was less than 32 |
132 vld1.64 {q9},[r2],r12 @ load [rotated] I[1] | 132 vld1.64 {q9},[r2],r12 @ load [rotated] I[1] |
133 #ifndef __ARMEB__ | 133 #ifndef __ARMEB__ |
134 vrev64.8 q9,q9 | 134 vrev64.8 q9,q9 |
135 #endif | 135 #endif |
136 vext.8 q7,q9,q9,#8 | 136 vext.8 q7,q9,q9,#8 |
137 veor q3,q3,q0 @ I[i]^=Xi | 137 veor q3,q3,q0 @ I[i]^=Xi |
138 .byte» 0x8e,0x8e,0xa8,0xf2» @ pmull q4,q12,q7» » @ H·Ii+1 | 138 .byte» 0x8e,0x8e,0xa8,0xf2» @ pmull q4,q12,q7» » @ H·Ii+1 |
139 veor q9,q9,q7 @ Karatsuba pre-processing | 139 veor q9,q9,q7 @ Karatsuba pre-processing |
140 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 | 140 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 |
141 b .Loop_mod2x_v8 | 141 b .Loop_mod2x_v8 |
142 | 142 |
143 .align 4 | 143 .align 4 |
144 .Loop_mod2x_v8: | 144 .Loop_mod2x_v8: |
145 vext.8 q10,q3,q3,#8 | 145 vext.8 q10,q3,q3,#8 |
146 subs r3,r3,#32 @ is there more data? | 146 subs r3,r3,#32 @ is there more data? |
147 .byte» 0x86,0x0e,0xac,0xf2» @ pmull q0,q14,q3» » @ H^2.lo·Xi.lo | 147 .byte» 0x86,0x0e,0xac,0xf2» @ pmull q0,q14,q3» » @ H^2.lo·Xi.lo |
148 movlo r12,#0 @ is it time to zero r12? | 148 movlo r12,#0 @ is it time to zero r12? |
149 | 149 |
150 .byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9 | 150 .byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9 |
151 veor q10,q10,q3 @ Karatsuba pre-processing | 151 veor q10,q10,q3 @ Karatsuba pre-processing |
152 .byte» 0x87,0x4e,0xad,0xf2» @ pmull2 q2,q14,q3» » @ H^2.hi·Xi.hi | 152 .byte» 0x87,0x4e,0xad,0xf2» @ pmull2 q2,q14,q3» » @ H^2.hi·Xi.hi |
153 veor q0,q0,q4 @ accumulate | 153 veor q0,q0,q4 @ accumulate |
154 .byte» 0xa5,0x2e,0xab,0xf2» @ pmull2 q1,q13,q10» » @ (H^2.lo+H^2.hi
)·(Xi.lo+Xi.hi) | 154 .byte» 0xa5,0x2e,0xab,0xf2» @ pmull2 q1,q13,q10» » @ (H^2.lo+H^2.hi
)·(Xi.lo+Xi.hi) |
155 vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2] | 155 vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2] |
156 | 156 |
157 veor q2,q2,q6 | 157 veor q2,q2,q6 |
158 moveq r12,#0 @ is it time to zero r12? | 158 moveq r12,#0 @ is it time to zero r12? |
159 veor q1,q1,q5 | 159 veor q1,q1,q5 |
160 | 160 |
161 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing | 161 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing |
162 veor q10,q0,q2 | 162 veor q10,q0,q2 |
163 veor q1,q1,q9 | 163 veor q1,q1,q9 |
164 vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3] | 164 vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3] |
165 #ifndef __ARMEB__ | 165 #ifndef __ARMEB__ |
166 vrev64.8 q8,q8 | 166 vrev64.8 q8,q8 |
167 #endif | 167 #endif |
168 veor q1,q1,q10 | 168 veor q1,q1,q10 |
169 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction | 169 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction |
170 | 170 |
171 #ifndef __ARMEB__ | 171 #ifndef __ARMEB__ |
172 vrev64.8 q9,q9 | 172 vrev64.8 q9,q9 |
173 #endif | 173 #endif |
174 vmov d4,d3 @ Xh|Xm - 256-bit result | 174 vmov d4,d3 @ Xh|Xm - 256-bit result |
175 vmov d3,d0 @ Xm is rotated Xl | 175 vmov d3,d0 @ Xm is rotated Xl |
176 vext.8 q7,q9,q9,#8 | 176 vext.8 q7,q9,q9,#8 |
177 vext.8 q3,q8,q8,#8 | 177 vext.8 q3,q8,q8,#8 |
178 veor q0,q1,q10 | 178 veor q0,q1,q10 |
179 .byte» 0x8e,0x8e,0xa8,0xf2» @ pmull q4,q12,q7» » @ H·Ii+1 | 179 .byte» 0x8e,0x8e,0xa8,0xf2» @ pmull q4,q12,q7» » @ H·Ii+1 |
180 veor q3,q3,q2 @ accumulate q3 early | 180 veor q3,q3,q2 @ accumulate q3 early |
181 | 181 |
182 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction | 182 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction |
183 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 | 183 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 |
184 veor q3,q3,q10 | 184 veor q3,q3,q10 |
185 veor q9,q9,q7 @ Karatsuba pre-processing | 185 veor q9,q9,q7 @ Karatsuba pre-processing |
186 veor q3,q3,q0 | 186 veor q3,q3,q0 |
187 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 | 187 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 |
188 bhs .Loop_mod2x_v8 @ there was at least 32 more bytes | 188 bhs .Loop_mod2x_v8 @ there was at least 32 more bytes |
189 | 189 |
190 veor q2,q2,q10 | 190 veor q2,q2,q10 |
191 vext.8 q3,q8,q8,#8 @ re-construct q3 | 191 vext.8 q3,q8,q8,#8 @ re-construct q3 |
192 adds r3,r3,#32 @ re-construct r3 | 192 adds r3,r3,#32 @ re-construct r3 |
193 veor q0,q0,q2 @ re-construct q0 | 193 veor q0,q0,q2 @ re-construct q0 |
194 beq .Ldone_v8 @ is r3 zero? | 194 beq .Ldone_v8 @ is r3 zero? |
195 .Lodd_tail_v8: | 195 .Lodd_tail_v8: |
196 vext.8 q10,q0,q0,#8 | 196 vext.8 q10,q0,q0,#8 |
197 veor q3,q3,q0 @ inp^=Xi | 197 veor q3,q3,q0 @ inp^=Xi |
198 veor q9,q8,q10 @ q9 is rotated inp^Xi | 198 veor q9,q8,q10 @ q9 is rotated inp^Xi |
199 | 199 |
200 .byte» 0x86,0x0e,0xa8,0xf2» @ pmull q0,q12,q3» » @ H.lo·Xi.lo | 200 .byte» 0x86,0x0e,0xa8,0xf2» @ pmull q0,q12,q3» » @ H.lo·Xi.lo |
201 veor q9,q9,q3 @ Karatsuba pre-processing | 201 veor q9,q9,q3 @ Karatsuba pre-processing |
202 .byte» 0x87,0x4e,0xa9,0xf2» @ pmull2 q2,q12,q3» » @ H.hi·Xi.hi | 202 .byte» 0x87,0x4e,0xa9,0xf2» @ pmull2 q2,q12,q3» » @ H.hi·Xi.hi |
203 .byte» 0xa2,0x2e,0xaa,0xf2» @ pmull q1,q13,q9» » @ (H.lo+H.hi)·(X
i.lo+Xi.hi) | 203 .byte» 0xa2,0x2e,0xaa,0xf2» @ pmull q1,q13,q9» » @ (H.lo+H.hi)·(
Xi.lo+Xi.hi) |
204 | 204 |
205 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing | 205 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing |
206 veor q10,q0,q2 | 206 veor q10,q0,q2 |
207 veor q1,q1,q9 | 207 veor q1,q1,q9 |
208 veor q1,q1,q10 | 208 veor q1,q1,q10 |
209 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction | 209 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction |
210 | 210 |
211 vmov d4,d3 @ Xh|Xm - 256-bit result | 211 vmov d4,d3 @ Xh|Xm - 256-bit result |
212 vmov d3,d0 @ Xm is rotated Xl | 212 vmov d3,d0 @ Xm is rotated Xl |
213 veor q0,q1,q10 | 213 veor q0,q1,q10 |
(...skipping 10 matching lines...) Expand all Loading... |
224 vext.8 q0,q0,q0,#8 | 224 vext.8 q0,q0,q0,#8 |
225 vst1.64 {q0},[r0] @ write out Xi | 225 vst1.64 {q0},[r0] @ write out Xi |
226 | 226 |
227 vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI say
s so | 227 vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI say
s so |
228 bx lr | 228 bx lr |
229 .size gcm_ghash_v8,.-gcm_ghash_v8 | 229 .size gcm_ghash_v8,.-gcm_ghash_v8 |
230 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79
,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,4
6,111,114,103,62,0 | 230 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79
,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,4
6,111,114,103,62,0 |
231 .align 2 | 231 .align 2 |
232 .align 2 | 232 .align 2 |
233 #endif | 233 #endif |
OLD | NEW |