OLD | NEW |
| (Empty) |
1 #!/usr/bin/perl -w | |
2 # | |
3 # MD5 optimized for AMD64. | |
4 # | |
5 # Author: Marc Bevand <bevand_m (at) epita.fr> | |
6 # Licence: I hereby disclaim the copyright on this code and place it | |
7 # in the public domain. | |
8 # | |
9 | |
10 use strict; | |
11 | |
12 my $code; | |
13 | |
14 # round1_step() does: | |
15 # dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s) | |
16 # %r10d = X[k_next] | |
17 # %r11d = z' (copy of z for the next step) | |
18 # Each round1_step() takes about 5.3 clocks (9 instructions, 1.7 IPC) | |
19 sub round1_step | |
20 { | |
21 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_; | |
22 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n
" if ($pos == -1); | |
23 $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx
*/\n" if ($pos == -1); | |
24 $code .= <<EOF; | |
25 xor $y, %r11d /* y ^ ... */ | |
26 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */ | |
27 and $x, %r11d /* x & ... */ | |
28 xor $z, %r11d /* z ^ ... */ | |
29 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ | |
30 add %r11d, $dst /* dst += ... */ | |
31 rol \$$s, $dst /* dst <<< s */ | |
32 mov $y, %r11d /* (NEXT STEP) z' = $y */ | |
33 add $x, $dst /* dst += x */ | |
34 EOF | |
35 } | |
36 | |
37 # round2_step() does: | |
38 # dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s) | |
39 # %r10d = X[k_next] | |
40 # %r11d = z' (copy of z for the next step) | |
41 # %r12d = z' (copy of z for the next step) | |
42 # Each round2_step() takes about 5.4 clocks (11 instructions, 2.0 IPC) | |
43 sub round2_step | |
44 { | |
45 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_; | |
46 $code .= " mov 1*4(%rsi), %r10d /* (NEXT STEP) X[1] */\n
" if ($pos == -1); | |
47 $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx
*/\n" if ($pos == -1); | |
48 $code .= " mov %edx, %r12d /* (NEXT STEP) z' = %edx
*/\n" if ($pos == -1); | |
49 $code .= <<EOF; | |
50 not %r11d /* not z */ | |
51 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */ | |
52 and $x, %r12d /* x & z */ | |
53 and $y, %r11d /* y & (not z) */ | |
54 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ | |
55 or %r11d, %r12d /* (y & (not z)) | (x & z) */ | |
56 mov $y, %r11d /* (NEXT STEP) z' = $y */ | |
57 add %r12d, $dst /* dst += ... */ | |
58 mov $y, %r12d /* (NEXT STEP) z' = $y */ | |
59 rol \$$s, $dst /* dst <<< s */ | |
60 add $x, $dst /* dst += x */ | |
61 EOF | |
62 } | |
63 | |
64 # round3_step() does: | |
65 # dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s) | |
66 # %r10d = X[k_next] | |
67 # %r11d = y' (copy of y for the next step) | |
68 # Each round3_step() takes about 4.2 clocks (8 instructions, 1.9 IPC) | |
69 sub round3_step | |
70 { | |
71 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_; | |
72 $code .= " mov 5*4(%rsi), %r10d /* (NEXT STEP) X[5] */\n
" if ($pos == -1); | |
73 $code .= " mov %ecx, %r11d /* (NEXT STEP) y' = %ecx
*/\n" if ($pos == -1); | |
74 $code .= <<EOF; | |
75 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */ | |
76 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ | |
77 xor $z, %r11d /* z ^ ... */ | |
78 xor $x, %r11d /* x ^ ... */ | |
79 add %r11d, $dst /* dst += ... */ | |
80 rol \$$s, $dst /* dst <<< s */ | |
81 mov $x, %r11d /* (NEXT STEP) y' = $x */ | |
82 add $x, $dst /* dst += x */ | |
83 EOF | |
84 } | |
85 | |
86 # round4_step() does: | |
87 # dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s) | |
88 # %r10d = X[k_next] | |
89 # %r11d = not z' (copy of not z for the next step) | |
90 # Each round4_step() takes about 5.2 clocks (9 instructions, 1.7 IPC) | |
91 sub round4_step | |
92 { | |
93 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_; | |
94 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n
" if ($pos == -1); | |
95 $code .= " mov \$0xffffffff, %r11d\n" if ($pos == -1); | |
96 $code .= " xor %edx, %r11d /* (NEXT STEP) not z' =
not %edx*/\n" | |
97 if ($pos == -1); | |
98 $code .= <<EOF; | |
99 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */ | |
100 or $x, %r11d /* x | ... */ | |
101 xor $y, %r11d /* y ^ ... */ | |
102 add %r11d, $dst /* dst += ... */ | |
103 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ | |
104 mov \$0xffffffff, %r11d | |
105 rol \$$s, $dst /* dst <<< s */ | |
106 xor $y, %r11d /* (NEXT STEP) not z' = not $y *
/ | |
107 add $x, $dst /* dst += x */ | |
108 EOF | |
109 } | |
110 | |
111 my $flavour = shift; | |
112 my $output = shift; | |
113 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } | |
114 | |
115 my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); | |
116 | |
117 $0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1; my $xlate; | |
118 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | |
119 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | |
120 die "can't locate x86_64-xlate.pl"; | |
121 | |
122 no warnings qw(uninitialized); | |
123 open OUT,"| \"$^X\" $xlate $flavour $output"; | |
124 *STDOUT=*OUT; | |
125 | |
126 $code .= <<EOF; | |
127 .text | |
128 .align 16 | |
129 | |
130 .globl md5_block_asm_data_order | |
131 .type md5_block_asm_data_order,\@function,3 | |
132 md5_block_asm_data_order: | |
133 push %rbp | |
134 push %rbx | |
135 push %r12 | |
136 push %r14 | |
137 push %r15 | |
138 .Lprologue: | |
139 | |
140 # rdi = arg #1 (ctx, MD5_CTX pointer) | |
141 # rsi = arg #2 (ptr, data pointer) | |
142 # rdx = arg #3 (nbr, number of 16-word blocks to process) | |
143 mov %rdi, %rbp # rbp = ctx | |
144 shl \$6, %rdx # rdx = nbr in bytes | |
145 lea (%rsi,%rdx), %rdi # rdi = end | |
146 mov 0*4(%rbp), %eax # eax = ctx->A | |
147 mov 1*4(%rbp), %ebx # ebx = ctx->B | |
148 mov 2*4(%rbp), %ecx # ecx = ctx->C | |
149 mov 3*4(%rbp), %edx # edx = ctx->D | |
150 # end is 'rdi' | |
151 # ptr is 'rsi' | |
152 # A is 'eax' | |
153 # B is 'ebx' | |
154 # C is 'ecx' | |
155 # D is 'edx' | |
156 | |
157 cmp %rdi, %rsi # cmp end with ptr | |
158 je .Lend # jmp if ptr == end | |
159 | |
160 # BEGIN of loop over 16-word blocks | |
161 .Lloop: # save old values of A, B, C, D | |
162 mov %eax, %r8d | |
163 mov %ebx, %r9d | |
164 mov %ecx, %r14d | |
165 mov %edx, %r15d | |
166 EOF | |
167 round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7'); | |
168 round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12'); | |
169 round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17'); | |
170 round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22'); | |
171 round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7'); | |
172 round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12'); | |
173 round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17'); | |
174 round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22'); | |
175 round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7'); | |
176 round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12'); | |
177 round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17'); | |
178 round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22'); | |
179 round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7'); | |
180 round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12'); | |
181 round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17'); | |
182 round1_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x49b40821','22'); | |
183 | |
184 round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5'); | |
185 round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9'); | |
186 round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14'); | |
187 round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20'); | |
188 round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5'); | |
189 round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9'); | |
190 round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14'); | |
191 round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20'); | |
192 round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5'); | |
193 round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9'); | |
194 round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14'); | |
195 round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20'); | |
196 round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5'); | |
197 round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9'); | |
198 round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14'); | |
199 round2_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x8d2a4c8a','20'); | |
200 | |
201 round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4'); | |
202 round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11'); | |
203 round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16'); | |
204 round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23'); | |
205 round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4'); | |
206 round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11'); | |
207 round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16'); | |
208 round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23'); | |
209 round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4'); | |
210 round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11'); | |
211 round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16'); | |
212 round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23'); | |
213 round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4'); | |
214 round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11'); | |
215 round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16'); | |
216 round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23'); | |
217 | |
218 round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6'); | |
219 round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10'); | |
220 round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15'); | |
221 round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21'); | |
222 round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6'); | |
223 round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10'); | |
224 round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15'); | |
225 round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21'); | |
226 round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6'); | |
227 round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10'); | |
228 round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15'); | |
229 round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21'); | |
230 round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6'); | |
231 round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10'); | |
232 round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15'); | |
233 round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21'); | |
234 $code .= <<EOF; | |
235 # add old values of A, B, C, D | |
236 add %r8d, %eax | |
237 add %r9d, %ebx | |
238 add %r14d, %ecx | |
239 add %r15d, %edx | |
240 | |
241 # loop control | |
242 add \$64, %rsi # ptr += 64 | |
243 cmp %rdi, %rsi # cmp end with ptr | |
244 jb .Lloop # jmp if ptr < end | |
245 # END of loop over 16-word blocks | |
246 | |
247 .Lend: | |
248 mov %eax, 0*4(%rbp) # ctx->A = A | |
249 mov %ebx, 1*4(%rbp) # ctx->B = B | |
250 mov %ecx, 2*4(%rbp) # ctx->C = C | |
251 mov %edx, 3*4(%rbp) # ctx->D = D | |
252 | |
253 mov (%rsp),%r15 | |
254 mov 8(%rsp),%r14 | |
255 mov 16(%rsp),%r12 | |
256 mov 24(%rsp),%rbx | |
257 mov 32(%rsp),%rbp | |
258 add \$40,%rsp | |
259 .Lepilogue: | |
260 ret | |
261 .size md5_block_asm_data_order,.-md5_block_asm_data_order | |
262 EOF | |
263 | |
264 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, | |
265 # CONTEXT *context,DISPATCHER_CONTEXT *disp) | |
266 if ($win64) { | |
267 my $rec="%rcx"; | |
268 my $frame="%rdx"; | |
269 my $context="%r8"; | |
270 my $disp="%r9"; | |
271 | |
272 $code.=<<___; | |
273 .extern __imp_RtlVirtualUnwind | |
274 .type se_handler,\@abi-omnipotent | |
275 .align 16 | |
276 se_handler: | |
277 push %rsi | |
278 push %rdi | |
279 push %rbx | |
280 push %rbp | |
281 push %r12 | |
282 push %r13 | |
283 push %r14 | |
284 push %r15 | |
285 pushfq | |
286 sub \$64,%rsp | |
287 | |
288 mov 120($context),%rax # pull context->Rax | |
289 mov 248($context),%rbx # pull context->Rip | |
290 | |
291 lea .Lprologue(%rip),%r10 | |
292 cmp %r10,%rbx # context->Rip<.Lprologue | |
293 jb .Lin_prologue | |
294 | |
295 mov 152($context),%rax # pull context->Rsp | |
296 | |
297 lea .Lepilogue(%rip),%r10 | |
298 cmp %r10,%rbx # context->Rip>=.Lepilogue | |
299 jae .Lin_prologue | |
300 | |
301 lea 40(%rax),%rax | |
302 | |
303 mov -8(%rax),%rbp | |
304 mov -16(%rax),%rbx | |
305 mov -24(%rax),%r12 | |
306 mov -32(%rax),%r14 | |
307 mov -40(%rax),%r15 | |
308 mov %rbx,144($context) # restore context->Rbx | |
309 mov %rbp,160($context) # restore context->Rbp | |
310 mov %r12,216($context) # restore context->R12 | |
311 mov %r14,232($context) # restore context->R14 | |
312 mov %r15,240($context) # restore context->R15 | |
313 | |
314 .Lin_prologue: | |
315 mov 8(%rax),%rdi | |
316 mov 16(%rax),%rsi | |
317 mov %rax,152($context) # restore context->Rsp | |
318 mov %rsi,168($context) # restore context->Rsi | |
319 mov %rdi,176($context) # restore context->Rdi | |
320 | |
321 mov 40($disp),%rdi # disp->ContextRecord | |
322 mov $context,%rsi # context | |
323 mov \$154,%ecx # sizeof(CONTEXT) | |
324 .long 0xa548f3fc # cld; rep movsq | |
325 | |
326 mov $disp,%rsi | |
327 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER | |
328 mov 8(%rsi),%rdx # arg2, disp->ImageBase | |
329 mov 0(%rsi),%r8 # arg3, disp->ControlPc | |
330 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry | |
331 mov 40(%rsi),%r10 # disp->ContextRecord | |
332 lea 56(%rsi),%r11 # &disp->HandlerData | |
333 lea 24(%rsi),%r12 # &disp->EstablisherFrame | |
334 mov %r10,32(%rsp) # arg5 | |
335 mov %r11,40(%rsp) # arg6 | |
336 mov %r12,48(%rsp) # arg7 | |
337 mov %rcx,56(%rsp) # arg8, (NULL) | |
338 call *__imp_RtlVirtualUnwind(%rip) | |
339 | |
340 mov \$1,%eax # ExceptionContinueSearch | |
341 add \$64,%rsp | |
342 popfq | |
343 pop %r15 | |
344 pop %r14 | |
345 pop %r13 | |
346 pop %r12 | |
347 pop %rbp | |
348 pop %rbx | |
349 pop %rdi | |
350 pop %rsi | |
351 ret | |
352 .size se_handler,.-se_handler | |
353 | |
354 .section .pdata | |
355 .align 4 | |
356 .rva .LSEH_begin_md5_block_asm_data_order | |
357 .rva .LSEH_end_md5_block_asm_data_order | |
358 .rva .LSEH_info_md5_block_asm_data_order | |
359 | |
360 .section .xdata | |
361 .align 8 | |
362 .LSEH_info_md5_block_asm_data_order: | |
363 .byte 9,0,0,0 | |
364 .rva se_handler | |
365 ___ | |
366 } | |
367 | |
368 print $code; | |
369 | |
370 close STDOUT; | |
OLD | NEW |