OLD | NEW |
| (Empty) |
1 #!/usr/bin/env perl | |
2 # | |
3 # ==================================================================== | |
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | |
5 # project. The module is, however, dual licensed under OpenSSL and | |
6 # CRYPTOGAMS licenses depending on where you obtain it. For further | |
7 # details see http://www.openssl.org/~appro/cryptogams/. | |
8 # ==================================================================== | |
9 # | |
10 # May 2011 | |
11 # | |
12 # The module implements bn_GF2m_mul_2x2 polynomial multiplication used | |
13 # in bn_gf2m.c. It's kind of low-hanging mechanical port from C for | |
14 # the time being... Except that it has two code paths: code suitable | |
15 # for any x86_64 CPU and PCLMULQDQ one suitable for Westmere and | |
16 # later. Improvement varies from one benchmark and µ-arch to another. | |
17 # Vanilla code path is at most 20% faster than compiler-generated code | |
18 # [not very impressive], while PCLMULQDQ - whole 85%-160% better on | |
19 # 163- and 571-bit ECDH benchmarks on Intel CPUs. Keep in mind that | |
20 # these coefficients are not ones for bn_GF2m_mul_2x2 itself, as not | |
21 # all CPU time is burnt in it... | |
22 | |
23 $flavour = shift; | |
24 $output = shift; | |
25 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } | |
26 | |
27 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); | |
28 | |
29 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | |
30 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | |
31 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | |
32 die "can't locate x86_64-xlate.pl"; | |
33 | |
34 open STDOUT,"| \"$^X\" $xlate $flavour $output"; | |
35 | |
36 ($lo,$hi)=("%rax","%rdx"); $a=$lo; | |
37 ($i0,$i1)=("%rsi","%rdi"); | |
38 ($t0,$t1)=("%rbx","%rcx"); | |
39 ($b,$mask)=("%rbp","%r8"); | |
40 ($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(9..15)); | |
41 ($R,$Tx)=("%xmm0","%xmm1"); | |
42 | |
43 $code.=<<___; | |
44 .text | |
45 | |
46 .type _mul_1x1,\@abi-omnipotent | |
47 .align 16 | |
48 _mul_1x1: | |
49 sub \$128+8,%rsp | |
50 mov \$-1,$a1 | |
51 lea ($a,$a),$i0 | |
52 shr \$3,$a1 | |
53 lea (,$a,4),$i1 | |
54 and $a,$a1 # a1=a&0x1fffffffffffffff | |
55 lea (,$a,8),$a8 | |
56 sar \$63,$a # broadcast 63rd bit | |
57 lea ($a1,$a1),$a2 | |
58 sar \$63,$i0 # broadcast 62nd bit | |
59 lea (,$a1,4),$a4 | |
60 and $b,$a | |
61 sar \$63,$i1 # boardcast 61st bit | |
62 mov $a,$hi # $a is $lo | |
63 shl \$63,$lo | |
64 and $b,$i0 | |
65 shr \$1,$hi | |
66 mov $i0,$t1 | |
67 shl \$62,$i0 | |
68 and $b,$i1 | |
69 shr \$2,$t1 | |
70 xor $i0,$lo | |
71 mov $i1,$t0 | |
72 shl \$61,$i1 | |
73 xor $t1,$hi | |
74 shr \$3,$t0 | |
75 xor $i1,$lo | |
76 xor $t0,$hi | |
77 | |
78 mov $a1,$a12 | |
79 movq \$0,0(%rsp) # tab[0]=0 | |
80 xor $a2,$a12 # a1^a2 | |
81 mov $a1,8(%rsp) # tab[1]=a1 | |
82 mov $a4,$a48 | |
83 mov $a2,16(%rsp) # tab[2]=a2 | |
84 xor $a8,$a48 # a4^a8 | |
85 mov $a12,24(%rsp) # tab[3]=a1^a2 | |
86 | |
87 xor $a4,$a1 | |
88 mov $a4,32(%rsp) # tab[4]=a4 | |
89 xor $a4,$a2 | |
90 mov $a1,40(%rsp) # tab[5]=a1^a4 | |
91 xor $a4,$a12 | |
92 mov $a2,48(%rsp) # tab[6]=a2^a4 | |
93 xor $a48,$a1 # a1^a4^a4^a8=a1^a8 | |
94 mov $a12,56(%rsp) # tab[7]=a1^a2^a4 | |
95 xor $a48,$a2 # a2^a4^a4^a8=a1^a8 | |
96 | |
97 mov $a8,64(%rsp) # tab[8]=a8 | |
98 xor $a48,$a12 # a1^a2^a4^a4^a8=a1^a2^a8 | |
99 mov $a1,72(%rsp) # tab[9]=a1^a8 | |
100 xor $a4,$a1 # a1^a8^a4 | |
101 mov $a2,80(%rsp) # tab[10]=a2^a8 | |
102 xor $a4,$a2 # a2^a8^a4 | |
103 mov $a12,88(%rsp) # tab[11]=a1^a2^a8 | |
104 | |
105 xor $a4,$a12 # a1^a2^a8^a4 | |
106 mov $a48,96(%rsp) # tab[12]=a4^a8 | |
107 mov $mask,$i0 | |
108 mov $a1,104(%rsp) # tab[13]=a1^a4^a8 | |
109 and $b,$i0 | |
110 mov $a2,112(%rsp) # tab[14]=a2^a4^a8 | |
111 shr \$4,$b | |
112 mov $a12,120(%rsp) # tab[15]=a1^a2^a4^a8 | |
113 mov $mask,$i1 | |
114 and $b,$i1 | |
115 shr \$4,$b | |
116 | |
117 movq (%rsp,$i0,8),$R # half of calculations is done in SSE2 | |
118 mov $mask,$i0 | |
119 and $b,$i0 | |
120 shr \$4,$b | |
121 ___ | |
122 for ($n=1;$n<8;$n++) { | |
123 $code.=<<___; | |
124 mov (%rsp,$i1,8),$t1 | |
125 mov $mask,$i1 | |
126 mov $t1,$t0 | |
127 shl \$`8*$n-4`,$t1 | |
128 and $b,$i1 | |
129 movq (%rsp,$i0,8),$Tx | |
130 shr \$`64-(8*$n-4)`,$t0 | |
131 xor $t1,$lo | |
132 pslldq \$$n,$Tx | |
133 mov $mask,$i0 | |
134 shr \$4,$b | |
135 xor $t0,$hi | |
136 and $b,$i0 | |
137 shr \$4,$b | |
138 pxor $Tx,$R | |
139 ___ | |
140 } | |
141 $code.=<<___; | |
142 mov (%rsp,$i1,8),$t1 | |
143 mov $t1,$t0 | |
144 shl \$`8*$n-4`,$t1 | |
145 movq $R,$i0 | |
146 shr \$`64-(8*$n-4)`,$t0 | |
147 xor $t1,$lo | |
148 psrldq \$8,$R | |
149 xor $t0,$hi | |
150 movq $R,$i1 | |
151 xor $i0,$lo | |
152 xor $i1,$hi | |
153 | |
154 add \$128+8,%rsp | |
155 ret | |
156 .Lend_mul_1x1: | |
157 .size _mul_1x1,.-_mul_1x1 | |
158 ___ | |
159 | |
160 ($rp,$a1,$a0,$b1,$b0) = $win64? ("%rcx","%rdx","%r8", "%r9","%r10") : # Win64
order | |
161 ("%rdi","%rsi","%rdx","%rcx","%r8"); # Unix o
rder | |
162 | |
163 $code.=<<___; | |
164 .extern OPENSSL_ia32cap_P | |
165 .globl bn_GF2m_mul_2x2 | |
166 .type bn_GF2m_mul_2x2,\@abi-omnipotent | |
167 .align 16 | |
168 bn_GF2m_mul_2x2: | |
169 mov OPENSSL_ia32cap_P(%rip),%rax | |
170 bt \$33,%rax | |
171 jnc .Lvanilla_mul_2x2 | |
172 | |
173 movq $a1,%xmm0 | |
174 movq $b1,%xmm1 | |
175 movq $a0,%xmm2 | |
176 ___ | |
177 $code.=<<___ if ($win64); | |
178 movq 40(%rsp),%xmm3 | |
179 ___ | |
180 $code.=<<___ if (!$win64); | |
181 movq $b0,%xmm3 | |
182 ___ | |
183 $code.=<<___; | |
184 movdqa %xmm0,%xmm4 | |
185 movdqa %xmm1,%xmm5 | |
186 pclmulqdq \$0,%xmm1,%xmm0 # a1·b1 | |
187 pxor %xmm2,%xmm4 | |
188 pxor %xmm3,%xmm5 | |
189 pclmulqdq \$0,%xmm3,%xmm2 # a0·b0 | |
190 pclmulqdq \$0,%xmm5,%xmm4 # (a0+a1)·(b0+b1) | |
191 xorps %xmm0,%xmm4 | |
192 xorps %xmm2,%xmm4 # (a0+a1)·(b0+b1)-a0·b0-a1·b1 | |
193 movdqa %xmm4,%xmm5 | |
194 pslldq \$8,%xmm4 | |
195 psrldq \$8,%xmm5 | |
196 pxor %xmm4,%xmm2 | |
197 pxor %xmm5,%xmm0 | |
198 movdqu %xmm2,0($rp) | |
199 movdqu %xmm0,16($rp) | |
200 ret | |
201 | |
202 .align 16 | |
203 .Lvanilla_mul_2x2: | |
204 lea -8*17(%rsp),%rsp | |
205 ___ | |
206 $code.=<<___ if ($win64); | |
207 mov `8*17+40`(%rsp),$b0 | |
208 mov %rdi,8*15(%rsp) | |
209 mov %rsi,8*16(%rsp) | |
210 ___ | |
211 $code.=<<___; | |
212 mov %r14,8*10(%rsp) | |
213 mov %r13,8*11(%rsp) | |
214 mov %r12,8*12(%rsp) | |
215 mov %rbp,8*13(%rsp) | |
216 mov %rbx,8*14(%rsp) | |
217 .Lbody_mul_2x2: | |
218 mov $rp,32(%rsp) # save the arguments | |
219 mov $a1,40(%rsp) | |
220 mov $a0,48(%rsp) | |
221 mov $b1,56(%rsp) | |
222 mov $b0,64(%rsp) | |
223 | |
224 mov \$0xf,$mask | |
225 mov $a1,$a | |
226 mov $b1,$b | |
227 call _mul_1x1 # a1·b1 | |
228 mov $lo,16(%rsp) | |
229 mov $hi,24(%rsp) | |
230 | |
231 mov 48(%rsp),$a | |
232 mov 64(%rsp),$b | |
233 call _mul_1x1 # a0·b0 | |
234 mov $lo,0(%rsp) | |
235 mov $hi,8(%rsp) | |
236 | |
237 mov 40(%rsp),$a | |
238 mov 56(%rsp),$b | |
239 xor 48(%rsp),$a | |
240 xor 64(%rsp),$b | |
241 call _mul_1x1 # (a0+a1)·(b0+b1) | |
242 ___ | |
243 @r=("%rbx","%rcx","%rdi","%rsi"); | |
244 $code.=<<___; | |
245 mov 0(%rsp),@r[0] | |
246 mov 8(%rsp),@r[1] | |
247 mov 16(%rsp),@r[2] | |
248 mov 24(%rsp),@r[3] | |
249 mov 32(%rsp),%rbp | |
250 | |
251 xor $hi,$lo | |
252 xor @r[1],$hi | |
253 xor @r[0],$lo | |
254 mov @r[0],0(%rbp) | |
255 xor @r[2],$hi | |
256 mov @r[3],24(%rbp) | |
257 xor @r[3],$lo | |
258 xor @r[3],$hi | |
259 xor $hi,$lo | |
260 mov $hi,16(%rbp) | |
261 mov $lo,8(%rbp) | |
262 | |
263 mov 8*10(%rsp),%r14 | |
264 mov 8*11(%rsp),%r13 | |
265 mov 8*12(%rsp),%r12 | |
266 mov 8*13(%rsp),%rbp | |
267 mov 8*14(%rsp),%rbx | |
268 ___ | |
269 $code.=<<___ if ($win64); | |
270 mov 8*15(%rsp),%rdi | |
271 mov 8*16(%rsp),%rsi | |
272 ___ | |
273 $code.=<<___; | |
274 lea 8*17(%rsp),%rsp | |
275 ret | |
276 .Lend_mul_2x2: | |
277 .size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2 | |
278 .asciz "GF(2^m) Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>" | |
279 .align 16 | |
280 ___ | |
281 | |
282 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, | |
283 # CONTEXT *context,DISPATCHER_CONTEXT *disp) | |
284 if ($win64) { | |
285 $rec="%rcx"; | |
286 $frame="%rdx"; | |
287 $context="%r8"; | |
288 $disp="%r9"; | |
289 | |
290 $code.=<<___; | |
291 .extern __imp_RtlVirtualUnwind | |
292 | |
293 .type se_handler,\@abi-omnipotent | |
294 .align 16 | |
295 se_handler: | |
296 push %rsi | |
297 push %rdi | |
298 push %rbx | |
299 push %rbp | |
300 push %r12 | |
301 push %r13 | |
302 push %r14 | |
303 push %r15 | |
304 pushfq | |
305 sub \$64,%rsp | |
306 | |
307 mov 152($context),%rax # pull context->Rsp | |
308 mov 248($context),%rbx # pull context->Rip | |
309 | |
310 lea .Lbody_mul_2x2(%rip),%r10 | |
311 cmp %r10,%rbx # context->Rip<"prologue" label | |
312 jb .Lin_prologue | |
313 | |
314 mov 8*10(%rax),%r14 # mimic epilogue | |
315 mov 8*11(%rax),%r13 | |
316 mov 8*12(%rax),%r12 | |
317 mov 8*13(%rax),%rbp | |
318 mov 8*14(%rax),%rbx | |
319 mov 8*15(%rax),%rdi | |
320 mov 8*16(%rax),%rsi | |
321 | |
322 mov %rbx,144($context) # restore context->Rbx | |
323 mov %rbp,160($context) # restore context->Rbp | |
324 mov %rsi,168($context) # restore context->Rsi | |
325 mov %rdi,176($context) # restore context->Rdi | |
326 mov %r12,216($context) # restore context->R12 | |
327 mov %r13,224($context) # restore context->R13 | |
328 mov %r14,232($context) # restore context->R14 | |
329 | |
330 .Lin_prologue: | |
331 lea 8*17(%rax),%rax | |
332 mov %rax,152($context) # restore context->Rsp | |
333 | |
334 mov 40($disp),%rdi # disp->ContextRecord | |
335 mov $context,%rsi # context | |
336 mov \$154,%ecx # sizeof(CONTEXT) | |
337 .long 0xa548f3fc # cld; rep movsq | |
338 | |
339 mov $disp,%rsi | |
340 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER | |
341 mov 8(%rsi),%rdx # arg2, disp->ImageBase | |
342 mov 0(%rsi),%r8 # arg3, disp->ControlPc | |
343 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry | |
344 mov 40(%rsi),%r10 # disp->ContextRecord | |
345 lea 56(%rsi),%r11 # &disp->HandlerData | |
346 lea 24(%rsi),%r12 # &disp->EstablisherFrame | |
347 mov %r10,32(%rsp) # arg5 | |
348 mov %r11,40(%rsp) # arg6 | |
349 mov %r12,48(%rsp) # arg7 | |
350 mov %rcx,56(%rsp) # arg8, (NULL) | |
351 call *__imp_RtlVirtualUnwind(%rip) | |
352 | |
353 mov \$1,%eax # ExceptionContinueSearch | |
354 add \$64,%rsp | |
355 popfq | |
356 pop %r15 | |
357 pop %r14 | |
358 pop %r13 | |
359 pop %r12 | |
360 pop %rbp | |
361 pop %rbx | |
362 pop %rdi | |
363 pop %rsi | |
364 ret | |
365 .size se_handler,.-se_handler | |
366 | |
367 .section .pdata | |
368 .align 4 | |
369 .rva _mul_1x1 | |
370 .rva .Lend_mul_1x1 | |
371 .rva .LSEH_info_1x1 | |
372 | |
373 .rva .Lvanilla_mul_2x2 | |
374 .rva .Lend_mul_2x2 | |
375 .rva .LSEH_info_2x2 | |
376 .section .xdata | |
377 .align 8 | |
378 .LSEH_info_1x1: | |
379 .byte 0x01,0x07,0x02,0x00 | |
380 .byte 0x07,0x01,0x11,0x00 # sub rsp,128+8 | |
381 .LSEH_info_2x2: | |
382 .byte 9,0,0,0 | |
383 .rva se_handler | |
384 ___ | |
385 } | |
386 | |
387 $code =~ s/\`([^\`]*)\`/eval($1)/gem; | |
388 print $code; | |
389 close STDOUT; | |
OLD | NEW |