| OLD | NEW |
| 1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay | 1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay |
| 2 * <olivier.gay@a3.epfl.ch> under a BSD-style license. See below. | 2 * <olivier.gay@a3.epfl.ch> under a BSD-style license. See below. |
| 3 */ | 3 */ |
| 4 | 4 |
| 5 /* | 5 /* |
| 6 * FIPS 180-2 SHA-224/256/384/512 implementation | 6 * FIPS 180-2 SHA-224/256/384/512 implementation |
| 7 * Last update: 02/02/2007 | 7 * Last update: 02/02/2007 |
| 8 * Issue date: 04/30/2005 | 8 * Issue date: 04/30/2005 |
| 9 * | 9 * |
| 10 * Copyright (C) 2005, 2007 Olivier Gay <olivier.gay@a3.epfl.ch> | 10 * Copyright (C) 2005, 2007 Olivier Gay <olivier.gay@a3.epfl.ch> |
| (...skipping 17 matching lines...) Expand all Loading... |
| 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE | 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE |
| 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 35 * SUCH DAMAGE. | 35 * SUCH DAMAGE. |
| 36 */ | 36 */ |
| 37 | 37 |
| 38 #include "sha.h" | 38 #include "cryptolib.h" |
| 39 #include <string.h> | 39 #include "utility.h" |
| 40 | 40 |
| 41 #define SHFR(x, n) (x >> n) | 41 #define SHFR(x, n) (x >> n) |
| 42 #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n))) | 42 #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n))) |
| 43 #define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n))) | 43 #define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n))) |
| 44 #define CH(x, y, z) ((x & y) ^ (~x & z)) | 44 #define CH(x, y, z) ((x & y) ^ (~x & z)) |
| 45 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) | 45 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) |
| 46 | 46 |
| 47 #define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22)) | 47 #define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22)) |
| 48 #define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25)) | 48 #define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25)) |
| 49 #define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3)) | 49 #define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3)) |
| (...skipping 283 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 333 | 333 |
| 334 | 334 |
| 335 void SHA256_update(SHA256_CTX* ctx, const uint8_t* data, uint64_t len) { | 335 void SHA256_update(SHA256_CTX* ctx, const uint8_t* data, uint64_t len) { |
| 336 unsigned int block_nb; | 336 unsigned int block_nb; |
| 337 unsigned int new_len, rem_len, tmp_len; | 337 unsigned int new_len, rem_len, tmp_len; |
| 338 const uint8_t *shifted_data; | 338 const uint8_t *shifted_data; |
| 339 | 339 |
| 340 tmp_len = SHA256_BLOCK_SIZE - ctx->len; | 340 tmp_len = SHA256_BLOCK_SIZE - ctx->len; |
| 341 rem_len = len < tmp_len ? len : tmp_len; | 341 rem_len = len < tmp_len ? len : tmp_len; |
| 342 | 342 |
| 343 memcpy(&ctx->block[ctx->len], data, rem_len); | 343 Memcpy(&ctx->block[ctx->len], data, rem_len); |
| 344 | 344 |
| 345 if (ctx->len + len < SHA256_BLOCK_SIZE) { | 345 if (ctx->len + len < SHA256_BLOCK_SIZE) { |
| 346 ctx->len += len; | 346 ctx->len += len; |
| 347 return; | 347 return; |
| 348 } | 348 } |
| 349 | 349 |
| 350 new_len = len - rem_len; | 350 new_len = len - rem_len; |
| 351 block_nb = new_len / SHA256_BLOCK_SIZE; | 351 block_nb = new_len / SHA256_BLOCK_SIZE; |
| 352 | 352 |
| 353 shifted_data = data + rem_len; | 353 shifted_data = data + rem_len; |
| 354 | 354 |
| 355 SHA256_transform(ctx, ctx->block, 1); | 355 SHA256_transform(ctx, ctx->block, 1); |
| 356 SHA256_transform(ctx, shifted_data, block_nb); | 356 SHA256_transform(ctx, shifted_data, block_nb); |
| 357 | 357 |
| 358 rem_len = new_len % SHA256_BLOCK_SIZE; | 358 rem_len = new_len % SHA256_BLOCK_SIZE; |
| 359 | 359 |
| 360 memcpy(ctx->block, &shifted_data[block_nb << 6], | 360 Memcpy(ctx->block, &shifted_data[block_nb << 6], |
| 361 rem_len); | 361 rem_len); |
| 362 | 362 |
| 363 ctx->len = rem_len; | 363 ctx->len = rem_len; |
| 364 ctx->tot_len += (block_nb + 1) << 6; | 364 ctx->tot_len += (block_nb + 1) << 6; |
| 365 } | 365 } |
| 366 | 366 |
| 367 uint8_t* SHA256_final(SHA256_CTX* ctx) { | 367 uint8_t* SHA256_final(SHA256_CTX* ctx) { |
| 368 unsigned int block_nb; | 368 unsigned int block_nb; |
| 369 unsigned int pm_len; | 369 unsigned int pm_len; |
| 370 unsigned int len_b; | 370 unsigned int len_b; |
| (...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 521 | 521 |
| 522 void SHA512_update(SHA512_CTX* ctx, const uint8_t* data, | 522 void SHA512_update(SHA512_CTX* ctx, const uint8_t* data, |
| 523 uint64_t len) { | 523 uint64_t len) { |
| 524 unsigned int block_nb; | 524 unsigned int block_nb; |
| 525 unsigned int new_len, rem_len, tmp_len; | 525 unsigned int new_len, rem_len, tmp_len; |
| 526 const uint8_t* shifted_data; | 526 const uint8_t* shifted_data; |
| 527 | 527 |
| 528 tmp_len = SHA512_BLOCK_SIZE - ctx->len; | 528 tmp_len = SHA512_BLOCK_SIZE - ctx->len; |
| 529 rem_len = len < tmp_len ? len : tmp_len; | 529 rem_len = len < tmp_len ? len : tmp_len; |
| 530 | 530 |
| 531 memcpy(&ctx->block[ctx->len], data, rem_len); | 531 Memcpy(&ctx->block[ctx->len], data, rem_len); |
| 532 | 532 |
| 533 if (ctx->len + len < SHA512_BLOCK_SIZE) { | 533 if (ctx->len + len < SHA512_BLOCK_SIZE) { |
| 534 ctx->len += len; | 534 ctx->len += len; |
| 535 return; | 535 return; |
| 536 } | 536 } |
| 537 | 537 |
| 538 new_len = len - rem_len; | 538 new_len = len - rem_len; |
| 539 block_nb = new_len / SHA512_BLOCK_SIZE; | 539 block_nb = new_len / SHA512_BLOCK_SIZE; |
| 540 | 540 |
| 541 shifted_data = data + rem_len; | 541 shifted_data = data + rem_len; |
| 542 | 542 |
| 543 SHA512_transform(ctx, ctx->block, 1); | 543 SHA512_transform(ctx, ctx->block, 1); |
| 544 SHA512_transform(ctx, shifted_data, block_nb); | 544 SHA512_transform(ctx, shifted_data, block_nb); |
| 545 | 545 |
| 546 rem_len = new_len % SHA512_BLOCK_SIZE; | 546 rem_len = new_len % SHA512_BLOCK_SIZE; |
| 547 | 547 |
| 548 memcpy(ctx->block, &shifted_data[block_nb << 7], | 548 Memcpy(ctx->block, &shifted_data[block_nb << 7], |
| 549 rem_len); | 549 rem_len); |
| 550 | 550 |
| 551 ctx->len = rem_len; | 551 ctx->len = rem_len; |
| 552 ctx->tot_len += (block_nb + 1) << 7; | 552 ctx->tot_len += (block_nb + 1) << 7; |
| 553 } | 553 } |
| 554 | 554 |
| 555 uint8_t* SHA512_final(SHA512_CTX* ctx) | 555 uint8_t* SHA512_final(SHA512_CTX* ctx) |
| 556 { | 556 { |
| 557 unsigned int block_nb; | 557 unsigned int block_nb; |
| 558 unsigned int pm_len; | 558 unsigned int pm_len; |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 614 int i; | 614 int i; |
| 615 SHA512_CTX ctx; | 615 SHA512_CTX ctx; |
| 616 SHA512_init(&ctx); | 616 SHA512_init(&ctx); |
| 617 SHA512_update(&ctx, data, len); | 617 SHA512_update(&ctx, data, len); |
| 618 p = SHA512_final(&ctx); | 618 p = SHA512_final(&ctx); |
| 619 for (i = 0; i < SHA512_DIGEST_SIZE; ++i) { | 619 for (i = 0; i < SHA512_DIGEST_SIZE; ++i) { |
| 620 digest[i] = *p++; | 620 digest[i] = *p++; |
| 621 } | 621 } |
| 622 return digest; | 622 return digest; |
| 623 } | 623 } |
| OLD | NEW |