OLD | NEW |
1 /* crypto/md32_common.h */ | 1 /* crypto/md32_common.h */ |
2 /* ==================================================================== | 2 /* ==================================================================== |
3 * Copyright (c) 1999-2007 The OpenSSL Project. All rights reserved. | 3 * Copyright (c) 1999-2007 The OpenSSL Project. All rights reserved. |
4 * | 4 * |
5 * Redistribution and use in source and binary forms, with or without | 5 * Redistribution and use in source and binary forms, with or without |
6 * modification, are permitted provided that the following conditions | 6 * modification, are permitted provided that the following conditions |
7 * are met: | 7 * are met: |
8 * | 8 * |
9 * 1. Redistributions of source code must retain the above copyright | 9 * 1. Redistributions of source code must retain the above copyright |
10 * notice, this list of conditions and the following disclaimer. | 10 * notice, this list of conditions and the following disclaimer. |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
158 * Some GNU C inline assembler templates. Note that these are | 158 * Some GNU C inline assembler templates. Note that these are |
159 * rotates by *constant* number of bits! But that's exactly | 159 * rotates by *constant* number of bits! But that's exactly |
160 * what we need here... | 160 * what we need here... |
161 * <appro@fy.chalmers.se> | 161 * <appro@fy.chalmers.se> |
162 */ | 162 */ |
163 # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86
_64__) | 163 # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86
_64__) |
164 # define ROTATE(a,n) ({ register unsigned int ret; \ | 164 # define ROTATE(a,n) ({ register unsigned int ret; \ |
165 asm ( \ | 165 asm ( \ |
166 "roll %1,%0" \ | 166 "roll %1,%0" \ |
167 : "=r"(ret) \ | 167 : "=r"(ret) \ |
168 » » » » : "I"(n), "0"(a)» \ | 168 » » » » : "I"(n), "0"((unsigned int)(a))» \ |
169 : "cc"); \ | 169 : "cc"); \ |
170 ret; \ | 170 ret; \ |
171 }) | 171 }) |
172 # elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \ | 172 # elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \ |
173 defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__) | 173 defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__) |
174 # define ROTATE(a,n) ({ register unsigned int ret; \ | 174 # define ROTATE(a,n) ({ register unsigned int ret; \ |
175 asm ( \ | 175 asm ( \ |
176 "rlwinm %0,%1,%2,0,31" \ | 176 "rlwinm %0,%1,%2,0,31" \ |
177 : "=r"(ret) \ | 177 : "=r"(ret) \ |
178 : "r"(a), "I"(n)); \ | 178 : "r"(a), "I"(n)); \ |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
286 HASH_LONG l; | 286 HASH_LONG l; |
287 size_t n; | 287 size_t n; |
288 | 288 |
289 if (len==0) return 1; | 289 if (len==0) return 1; |
290 | 290 |
291 l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL; | 291 l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL; |
292 /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to | 292 /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to |
293 * Wei Dai <weidai@eskimo.com> for pointing it out. */ | 293 * Wei Dai <weidai@eskimo.com> for pointing it out. */ |
294 if (l < c->Nl) /* overflow */ | 294 if (l < c->Nl) /* overflow */ |
295 c->Nh++; | 295 c->Nh++; |
296 » c->Nh+=(len>>29);» /* might cause compiler warning on 16-bit */ | 296 » c->Nh+=(HASH_LONG)(len>>29);» /* might cause compiler warning on 16-bi
t */ |
297 c->Nl=l; | 297 c->Nl=l; |
298 | 298 |
299 n = c->num; | 299 n = c->num; |
300 if (n != 0) | 300 if (n != 0) |
301 { | 301 { |
302 p=(unsigned char *)c->data; | 302 p=(unsigned char *)c->data; |
303 | 303 |
304 if (len >= HASH_CBLOCK || len+n >= HASH_CBLOCK) | 304 if (len >= HASH_CBLOCK || len+n >= HASH_CBLOCK) |
305 { | 305 { |
306 memcpy (p+n,data,HASH_CBLOCK-n); | 306 memcpy (p+n,data,HASH_CBLOCK-n); |
(...skipping 17 matching lines...) Expand all Loading... |
324 { | 324 { |
325 HASH_BLOCK_DATA_ORDER (c,data,n); | 325 HASH_BLOCK_DATA_ORDER (c,data,n); |
326 n *= HASH_CBLOCK; | 326 n *= HASH_CBLOCK; |
327 data += n; | 327 data += n; |
328 len -= n; | 328 len -= n; |
329 } | 329 } |
330 | 330 |
331 if (len != 0) | 331 if (len != 0) |
332 { | 332 { |
333 p = (unsigned char *)c->data; | 333 p = (unsigned char *)c->data; |
334 » » c->num = len; | 334 » » c->num = (unsigned int)len; |
335 memcpy (p,data,len); | 335 memcpy (p,data,len); |
336 } | 336 } |
337 return 1; | 337 return 1; |
338 } | 338 } |
339 | 339 |
340 | 340 |
341 void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data) | 341 void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data) |
342 { | 342 { |
343 HASH_BLOCK_DATA_ORDER (c,data,1); | 343 HASH_BLOCK_DATA_ORDER (c,data,1); |
344 } | 344 } |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
376 #ifndef HASH_MAKE_STRING | 376 #ifndef HASH_MAKE_STRING |
377 #error "HASH_MAKE_STRING must be defined!" | 377 #error "HASH_MAKE_STRING must be defined!" |
378 #else | 378 #else |
379 HASH_MAKE_STRING(c,md); | 379 HASH_MAKE_STRING(c,md); |
380 #endif | 380 #endif |
381 | 381 |
382 return 1; | 382 return 1; |
383 } | 383 } |
384 | 384 |
385 #ifndef MD32_REG_T | 385 #ifndef MD32_REG_T |
| 386 #if defined(__alpha) || defined(__sparcv9) || defined(__mips) |
386 #define MD32_REG_T long | 387 #define MD32_REG_T long |
387 /* | 388 /* |
388 * This comment was originaly written for MD5, which is why it | 389 * This comment was originaly written for MD5, which is why it |
389 * discusses A-D. But it basically applies to all 32-bit digests, | 390 * discusses A-D. But it basically applies to all 32-bit digests, |
390 * which is why it was moved to common header file. | 391 * which is why it was moved to common header file. |
391 * | 392 * |
392 * In case you wonder why A-D are declared as long and not | 393 * In case you wonder why A-D are declared as long and not |
393 * as MD5_LONG. Doing so results in slight performance | 394 * as MD5_LONG. Doing so results in slight performance |
394 * boost on LP64 architectures. The catch is we don't | 395 * boost on LP64 architectures. The catch is we don't |
395 * really care if 32 MSBs of a 64-bit register get polluted | 396 * really care if 32 MSBs of a 64-bit register get polluted |
396 * with eventual overflows as we *save* only 32 LSBs in | 397 * with eventual overflows as we *save* only 32 LSBs in |
397 * *either* case. Now declaring 'em long excuses the compiler | 398 * *either* case. Now declaring 'em long excuses the compiler |
398 * from keeping 32 MSBs zeroed resulting in 13% performance | 399 * from keeping 32 MSBs zeroed resulting in 13% performance |
399 * improvement under SPARC Solaris7/64 and 5% under AlphaLinux. | 400 * improvement under SPARC Solaris7/64 and 5% under AlphaLinux. |
400 * Well, to be honest it should say that this *prevents* | 401 * Well, to be honest it should say that this *prevents* |
401 * performance degradation. | 402 * performance degradation. |
402 * <appro@fy.chalmers.se> | 403 * <appro@fy.chalmers.se> |
403 * Apparently there're LP64 compilers that generate better | 404 */ |
404 * code if A-D are declared int. Most notably GCC-x86_64 | 405 #else |
405 * generates better code. | 406 /* |
| 407 * Above is not absolute and there are LP64 compilers that |
| 408 * generate better code if MD32_REG_T is defined int. The above |
| 409 * pre-processor condition reflects the circumstances under which |
| 410 * the conclusion was made and is subject to further extension. |
406 * <appro@fy.chalmers.se> | 411 * <appro@fy.chalmers.se> |
407 */ | 412 */ |
| 413 #define MD32_REG_T int |
408 #endif | 414 #endif |
| 415 #endif |
OLD | NEW |