OLD | NEW |
(Empty) | |
| 1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay |
| 2 * <olivier.gay@a3.epfl.ch> under a BSD-style license. See below. |
| 3 */ |
| 4 |
| 5 /* |
| 6 * FIPS 180-2 SHA-224/256/384/512 implementation |
| 7 * Last update: 02/02/2007 |
| 8 * Issue date: 04/30/2005 |
| 9 * |
| 10 * Copyright (C) 2005, 2007 Olivier Gay <olivier.gay@a3.epfl.ch> |
| 11 * All rights reserved. |
| 12 * |
| 13 * Redistribution and use in source and binary forms, with or without |
| 14 * modification, are permitted provided that the following conditions |
| 15 * are met: |
| 16 * 1. Redistributions of source code must retain the above copyright |
| 17 * notice, this list of conditions and the following disclaimer. |
| 18 * 2. Redistributions in binary form must reproduce the above copyright |
| 19 * notice, this list of conditions and the following disclaimer in the |
| 20 * documentation and/or other materials provided with the distribution. |
| 21 * 3. Neither the name of the project nor the names of its contributors |
| 22 * may be used to endorse or promote products derived from this software |
| 23 * without specific prior written permission. |
| 24 * |
| 25 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND |
| 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE |
| 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 35 * SUCH DAMAGE. |
| 36 */ |
| 37 |
| 38 #include "sha.h" |
| 39 #include <string.h> |
| 40 |
| 41 #define SHFR(x, n) (x >> n) |
| 42 #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n))) |
| 43 #define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n))) |
| 44 #define CH(x, y, z) ((x & y) ^ (~x & z)) |
| 45 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) |
| 46 |
| 47 #define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22)) |
| 48 #define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25)) |
| 49 #define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3)) |
| 50 #define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10)) |
| 51 |
| 52 #define SHA512_F1(x) (ROTR(x, 28) ^ ROTR(x, 34) ^ ROTR(x, 39)) |
| 53 #define SHA512_F2(x) (ROTR(x, 14) ^ ROTR(x, 18) ^ ROTR(x, 41)) |
| 54 #define SHA512_F3(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHFR(x, 7)) |
| 55 #define SHA512_F4(x) (ROTR(x, 19) ^ ROTR(x, 61) ^ SHFR(x, 6)) |
| 56 |
| 57 #define UNPACK32(x, str) \ |
| 58 { \ |
| 59 *((str) + 3) = (uint8_t) ((x) ); \ |
| 60 *((str) + 2) = (uint8_t) ((x) >> 8); \ |
| 61 *((str) + 1) = (uint8_t) ((x) >> 16); \ |
| 62 *((str) + 0) = (uint8_t) ((x) >> 24); \ |
| 63 } |
| 64 |
| 65 #define PACK32(str, x) \ |
| 66 { \ |
| 67 *(x) = ((uint32_t) *((str) + 3) ) \ |
| 68 | ((uint32_t) *((str) + 2) << 8) \ |
| 69 | ((uint32_t) *((str) + 1) << 16) \ |
| 70 | ((uint32_t) *((str) + 0) << 24); \ |
| 71 } |
| 72 |
| 73 #define UNPACK64(x, str) \ |
| 74 { \ |
| 75 *((str) + 7) = (uint8_t) ((x) ); \ |
| 76 *((str) + 6) = (uint8_t) ((x) >> 8); \ |
| 77 *((str) + 5) = (uint8_t) ((x) >> 16); \ |
| 78 *((str) + 4) = (uint8_t) ((x) >> 24); \ |
| 79 *((str) + 3) = (uint8_t) ((x) >> 32); \ |
| 80 *((str) + 2) = (uint8_t) ((x) >> 40); \ |
| 81 *((str) + 1) = (uint8_t) ((x) >> 48); \ |
| 82 *((str) + 0) = (uint8_t) ((x) >> 56); \ |
| 83 } |
| 84 |
| 85 #define PACK64(str, x) \ |
| 86 { \ |
| 87 *(x) = ((uint64_t) *((str) + 7) ) \ |
| 88 | ((uint64_t) *((str) + 6) << 8) \ |
| 89 | ((uint64_t) *((str) + 5) << 16) \ |
| 90 | ((uint64_t) *((str) + 4) << 24) \ |
| 91 | ((uint64_t) *((str) + 3) << 32) \ |
| 92 | ((uint64_t) *((str) + 2) << 40) \ |
| 93 | ((uint64_t) *((str) + 1) << 48) \ |
| 94 | ((uint64_t) *((str) + 0) << 56); \ |
| 95 } |
| 96 |
| 97 /* Macros used for loops unrolling */ |
| 98 |
| 99 #define SHA256_SCR(i) \ |
| 100 { \ |
| 101 w[i] = SHA256_F4(w[i - 2]) + w[i - 7] \ |
| 102 + SHA256_F3(w[i - 15]) + w[i - 16]; \ |
| 103 } |
| 104 |
| 105 #define SHA512_SCR(i) \ |
| 106 { \ |
| 107 w[i] = SHA512_F4(w[i - 2]) + w[i - 7] \ |
| 108 + SHA512_F3(w[i - 15]) + w[i - 16]; \ |
| 109 } |
| 110 |
| 111 #define SHA256_EXP(a, b, c, d, e, f, g, h, j) \ |
| 112 { \ |
| 113 t1 = wv[h] + SHA256_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) \ |
| 114 + sha256_k[j] + w[j]; \ |
| 115 t2 = SHA256_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \ |
| 116 wv[d] += t1; \ |
| 117 wv[h] = t1 + t2; \ |
| 118 } |
| 119 |
| 120 #define SHA512_EXP(a, b, c, d, e, f, g ,h, j) \ |
| 121 { \ |
| 122 t1 = wv[h] + SHA512_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) \ |
| 123 + sha512_k[j] + w[j]; \ |
| 124 t2 = SHA512_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \ |
| 125 wv[d] += t1; \ |
| 126 wv[h] = t1 + t2; \ |
| 127 } |
| 128 |
| 129 uint32_t sha256_h0[8] = { |
| 130 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, |
| 131 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; |
| 132 |
| 133 uint64_t sha512_h0[8] = { |
| 134 0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, |
| 135 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL, |
| 136 0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, |
| 137 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL}; |
| 138 |
| 139 uint32_t sha256_k[64] = { |
| 140 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, |
| 141 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, |
| 142 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, |
| 143 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, |
| 144 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, |
| 145 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, |
| 146 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, |
| 147 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, |
| 148 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, |
| 149 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, |
| 150 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, |
| 151 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, |
| 152 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, |
| 153 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, |
| 154 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, |
| 155 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2}; |
| 156 |
| 157 uint64_t sha512_k[80] = { |
| 158 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, |
| 159 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, |
| 160 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, |
| 161 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, |
| 162 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, |
| 163 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, |
| 164 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, |
| 165 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL, |
| 166 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, |
| 167 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, |
| 168 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, |
| 169 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, |
| 170 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, |
| 171 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, |
| 172 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, |
| 173 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, |
| 174 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, |
| 175 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, |
| 176 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, |
| 177 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, |
| 178 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, |
| 179 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, |
| 180 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, |
| 181 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, |
| 182 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, |
| 183 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, |
| 184 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, |
| 185 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, |
| 186 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, |
| 187 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, |
| 188 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, |
| 189 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, |
| 190 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, |
| 191 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, |
| 192 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, |
| 193 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, |
| 194 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, |
| 195 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL, |
| 196 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, |
| 197 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL}; |
| 198 |
| 199 |
| 200 /* SHA-256 implementation */ |
| 201 void SHA256_init(SHA256_CTX *ctx) { |
| 202 #ifndef UNROLL_LOOPS |
| 203 int i; |
| 204 for (i = 0; i < 8; i++) { |
| 205 ctx->h[i] = sha256_h0[i]; |
| 206 } |
| 207 #else |
| 208 ctx->h[0] = sha256_h0[0]; ctx->h[1] = sha256_h0[1]; |
| 209 ctx->h[2] = sha256_h0[2]; ctx->h[3] = sha256_h0[3]; |
| 210 ctx->h[4] = sha256_h0[4]; ctx->h[5] = sha256_h0[5]; |
| 211 ctx->h[6] = sha256_h0[6]; ctx->h[7] = sha256_h0[7]; |
| 212 #endif /* !UNROLL_LOOPS */ |
| 213 |
| 214 ctx->len = 0; |
| 215 ctx->tot_len = 0; |
| 216 } |
| 217 |
| 218 |
| 219 static void SHA256_transform(SHA256_CTX* ctx, const uint8_t* message, |
| 220 unsigned int block_nb) { |
| 221 uint32_t w[64]; |
| 222 uint32_t wv[8]; |
| 223 uint32_t t1, t2; |
| 224 const unsigned char *sub_block; |
| 225 int i; |
| 226 |
| 227 #ifndef UNROLL_LOOPS |
| 228 int j; |
| 229 #endif |
| 230 |
| 231 for (i = 0; i < (int) block_nb; i++) { |
| 232 sub_block = message + (i << 6); |
| 233 |
| 234 #ifndef UNROLL_LOOPS |
| 235 for (j = 0; j < 16; j++) { |
| 236 PACK32(&sub_block[j << 2], &w[j]); |
| 237 } |
| 238 |
| 239 for (j = 16; j < 64; j++) { |
| 240 SHA256_SCR(j); |
| 241 } |
| 242 |
| 243 for (j = 0; j < 8; j++) { |
| 244 wv[j] = ctx->h[j]; |
| 245 } |
| 246 |
| 247 for (j = 0; j < 64; j++) { |
| 248 t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) |
| 249 + sha256_k[j] + w[j]; |
| 250 t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]); |
| 251 wv[7] = wv[6]; |
| 252 wv[6] = wv[5]; |
| 253 wv[5] = wv[4]; |
| 254 wv[4] = wv[3] + t1; |
| 255 wv[3] = wv[2]; |
| 256 wv[2] = wv[1]; |
| 257 wv[1] = wv[0]; |
| 258 wv[0] = t1 + t2; |
| 259 } |
| 260 |
| 261 for (j = 0; j < 8; j++) { |
| 262 ctx->h[j] += wv[j]; |
| 263 } |
| 264 #else |
| 265 PACK32(&sub_block[ 0], &w[ 0]); PACK32(&sub_block[ 4], &w[ 1]); |
| 266 PACK32(&sub_block[ 8], &w[ 2]); PACK32(&sub_block[12], &w[ 3]); |
| 267 PACK32(&sub_block[16], &w[ 4]); PACK32(&sub_block[20], &w[ 5]); |
| 268 PACK32(&sub_block[24], &w[ 6]); PACK32(&sub_block[28], &w[ 7]); |
| 269 PACK32(&sub_block[32], &w[ 8]); PACK32(&sub_block[36], &w[ 9]); |
| 270 PACK32(&sub_block[40], &w[10]); PACK32(&sub_block[44], &w[11]); |
| 271 PACK32(&sub_block[48], &w[12]); PACK32(&sub_block[52], &w[13]); |
| 272 PACK32(&sub_block[56], &w[14]); PACK32(&sub_block[60], &w[15]); |
| 273 |
| 274 SHA256_SCR(16); SHA256_SCR(17); SHA256_SCR(18); SHA256_SCR(19); |
| 275 SHA256_SCR(20); SHA256_SCR(21); SHA256_SCR(22); SHA256_SCR(23); |
| 276 SHA256_SCR(24); SHA256_SCR(25); SHA256_SCR(26); SHA256_SCR(27); |
| 277 SHA256_SCR(28); SHA256_SCR(29); SHA256_SCR(30); SHA256_SCR(31); |
| 278 SHA256_SCR(32); SHA256_SCR(33); SHA256_SCR(34); SHA256_SCR(35); |
| 279 SHA256_SCR(36); SHA256_SCR(37); SHA256_SCR(38); SHA256_SCR(39); |
| 280 SHA256_SCR(40); SHA256_SCR(41); SHA256_SCR(42); SHA256_SCR(43); |
| 281 SHA256_SCR(44); SHA256_SCR(45); SHA256_SCR(46); SHA256_SCR(47); |
| 282 SHA256_SCR(48); SHA256_SCR(49); SHA256_SCR(50); SHA256_SCR(51); |
| 283 SHA256_SCR(52); SHA256_SCR(53); SHA256_SCR(54); SHA256_SCR(55); |
| 284 SHA256_SCR(56); SHA256_SCR(57); SHA256_SCR(58); SHA256_SCR(59); |
| 285 SHA256_SCR(60); SHA256_SCR(61); SHA256_SCR(62); SHA256_SCR(63); |
| 286 |
| 287 wv[0] = ctx->h[0]; wv[1] = ctx->h[1]; |
| 288 wv[2] = ctx->h[2]; wv[3] = ctx->h[3]; |
| 289 wv[4] = ctx->h[4]; wv[5] = ctx->h[5]; |
| 290 wv[6] = ctx->h[6]; wv[7] = ctx->h[7]; |
| 291 |
| 292 SHA256_EXP(0,1,2,3,4,5,6,7, 0); SHA256_EXP(7,0,1,2,3,4,5,6, 1); |
| 293 SHA256_EXP(6,7,0,1,2,3,4,5, 2); SHA256_EXP(5,6,7,0,1,2,3,4, 3); |
| 294 SHA256_EXP(4,5,6,7,0,1,2,3, 4); SHA256_EXP(3,4,5,6,7,0,1,2, 5); |
| 295 SHA256_EXP(2,3,4,5,6,7,0,1, 6); SHA256_EXP(1,2,3,4,5,6,7,0, 7); |
| 296 SHA256_EXP(0,1,2,3,4,5,6,7, 8); SHA256_EXP(7,0,1,2,3,4,5,6, 9); |
| 297 SHA256_EXP(6,7,0,1,2,3,4,5,10); SHA256_EXP(5,6,7,0,1,2,3,4,11); |
| 298 SHA256_EXP(4,5,6,7,0,1,2,3,12); SHA256_EXP(3,4,5,6,7,0,1,2,13); |
| 299 SHA256_EXP(2,3,4,5,6,7,0,1,14); SHA256_EXP(1,2,3,4,5,6,7,0,15); |
| 300 SHA256_EXP(0,1,2,3,4,5,6,7,16); SHA256_EXP(7,0,1,2,3,4,5,6,17); |
| 301 SHA256_EXP(6,7,0,1,2,3,4,5,18); SHA256_EXP(5,6,7,0,1,2,3,4,19); |
| 302 SHA256_EXP(4,5,6,7,0,1,2,3,20); SHA256_EXP(3,4,5,6,7,0,1,2,21); |
| 303 SHA256_EXP(2,3,4,5,6,7,0,1,22); SHA256_EXP(1,2,3,4,5,6,7,0,23); |
| 304 SHA256_EXP(0,1,2,3,4,5,6,7,24); SHA256_EXP(7,0,1,2,3,4,5,6,25); |
| 305 SHA256_EXP(6,7,0,1,2,3,4,5,26); SHA256_EXP(5,6,7,0,1,2,3,4,27); |
| 306 SHA256_EXP(4,5,6,7,0,1,2,3,28); SHA256_EXP(3,4,5,6,7,0,1,2,29); |
| 307 SHA256_EXP(2,3,4,5,6,7,0,1,30); SHA256_EXP(1,2,3,4,5,6,7,0,31); |
| 308 SHA256_EXP(0,1,2,3,4,5,6,7,32); SHA256_EXP(7,0,1,2,3,4,5,6,33); |
| 309 SHA256_EXP(6,7,0,1,2,3,4,5,34); SHA256_EXP(5,6,7,0,1,2,3,4,35); |
| 310 SHA256_EXP(4,5,6,7,0,1,2,3,36); SHA256_EXP(3,4,5,6,7,0,1,2,37); |
| 311 SHA256_EXP(2,3,4,5,6,7,0,1,38); SHA256_EXP(1,2,3,4,5,6,7,0,39); |
| 312 SHA256_EXP(0,1,2,3,4,5,6,7,40); SHA256_EXP(7,0,1,2,3,4,5,6,41); |
| 313 SHA256_EXP(6,7,0,1,2,3,4,5,42); SHA256_EXP(5,6,7,0,1,2,3,4,43); |
| 314 SHA256_EXP(4,5,6,7,0,1,2,3,44); SHA256_EXP(3,4,5,6,7,0,1,2,45); |
| 315 SHA256_EXP(2,3,4,5,6,7,0,1,46); SHA256_EXP(1,2,3,4,5,6,7,0,47); |
| 316 SHA256_EXP(0,1,2,3,4,5,6,7,48); SHA256_EXP(7,0,1,2,3,4,5,6,49); |
| 317 SHA256_EXP(6,7,0,1,2,3,4,5,50); SHA256_EXP(5,6,7,0,1,2,3,4,51); |
| 318 SHA256_EXP(4,5,6,7,0,1,2,3,52); SHA256_EXP(3,4,5,6,7,0,1,2,53); |
| 319 SHA256_EXP(2,3,4,5,6,7,0,1,54); SHA256_EXP(1,2,3,4,5,6,7,0,55); |
| 320 SHA256_EXP(0,1,2,3,4,5,6,7,56); SHA256_EXP(7,0,1,2,3,4,5,6,57); |
| 321 SHA256_EXP(6,7,0,1,2,3,4,5,58); SHA256_EXP(5,6,7,0,1,2,3,4,59); |
| 322 SHA256_EXP(4,5,6,7,0,1,2,3,60); SHA256_EXP(3,4,5,6,7,0,1,2,61); |
| 323 SHA256_EXP(2,3,4,5,6,7,0,1,62); SHA256_EXP(1,2,3,4,5,6,7,0,63); |
| 324 |
| 325 ctx->h[0] += wv[0]; ctx->h[1] += wv[1]; |
| 326 ctx->h[2] += wv[2]; ctx->h[3] += wv[3]; |
| 327 ctx->h[4] += wv[4]; ctx->h[5] += wv[5]; |
| 328 ctx->h[6] += wv[6]; ctx->h[7] += wv[7]; |
| 329 #endif /* !UNROLL_LOOPS */ |
| 330 } |
| 331 } |
| 332 |
| 333 |
| 334 |
| 335 void SHA256_update(SHA256_CTX* ctx, const uint8_t* data, int len) { |
| 336 unsigned int block_nb; |
| 337 unsigned int new_len, rem_len, tmp_len; |
| 338 const uint8_t *shifted_data; |
| 339 |
| 340 tmp_len = SHA256_BLOCK_SIZE - ctx->len; |
| 341 rem_len = len < tmp_len ? len : tmp_len; |
| 342 |
| 343 memcpy(&ctx->block[ctx->len], data, rem_len); |
| 344 |
| 345 if (ctx->len + len < SHA256_BLOCK_SIZE) { |
| 346 ctx->len += len; |
| 347 return; |
| 348 } |
| 349 |
| 350 new_len = len - rem_len; |
| 351 block_nb = new_len / SHA256_BLOCK_SIZE; |
| 352 |
| 353 shifted_data = data + rem_len; |
| 354 |
| 355 SHA256_transform(ctx, ctx->block, 1); |
| 356 SHA256_transform(ctx, shifted_data, block_nb); |
| 357 |
| 358 rem_len = new_len % SHA256_BLOCK_SIZE; |
| 359 |
| 360 memcpy(ctx->block, &shifted_data[block_nb << 6], |
| 361 rem_len); |
| 362 |
| 363 ctx->len = rem_len; |
| 364 ctx->tot_len += (block_nb + 1) << 6; |
| 365 } |
| 366 |
| 367 uint8_t* SHA256_final(SHA256_CTX* ctx) { |
| 368 unsigned int block_nb; |
| 369 unsigned int pm_len; |
| 370 unsigned int len_b; |
| 371 #ifndef UNROLL_LOOPS |
| 372 int i; |
| 373 #endif |
| 374 |
| 375 block_nb = (1 + ((SHA256_BLOCK_SIZE - 9) |
| 376 < (ctx->len % SHA256_BLOCK_SIZE))); |
| 377 |
| 378 len_b = (ctx->tot_len + ctx->len) << 3; |
| 379 pm_len = block_nb << 6; |
| 380 |
| 381 memset(ctx->block + ctx->len, 0, pm_len - ctx->len); |
| 382 ctx->block[ctx->len] = 0x80; |
| 383 UNPACK32(len_b, ctx->block + pm_len - 4); |
| 384 |
| 385 SHA256_transform(ctx, ctx->block, block_nb); |
| 386 |
| 387 #ifndef UNROLL_LOOPS |
| 388 for (i = 0 ; i < 8; i++) { |
| 389 UNPACK32(ctx->h[i], &ctx->buf[i << 2]); |
| 390 } |
| 391 #else |
| 392 UNPACK32(ctx->h[0], &ctx->buf[ 0]); |
| 393 UNPACK32(ctx->h[1], &ctx->buf[ 4]); |
| 394 UNPACK32(ctx->h[2], &ctx->buf[ 8]); |
| 395 UNPACK32(ctx->h[3], &ctx->buf[12]); |
| 396 UNPACK32(ctx->h[4], &ctx->buf[16]); |
| 397 UNPACK32(ctx->h[5], &ctx->buf[20]); |
| 398 UNPACK32(ctx->h[6], &ctx->buf[24]); |
| 399 UNPACK32(ctx->h[7], &ctx->buf[28]); |
| 400 #endif /* !UNROLL_LOOPS */ |
| 401 |
| 402 return ctx->buf; |
| 403 } |
| 404 |
| 405 |
| 406 /* SHA-512 implementation */ |
| 407 |
| 408 void SHA512_init(SHA512_CTX *ctx) { |
| 409 #ifndef UNROLL_LOOPS |
| 410 int i; |
| 411 for (i = 0; i < 8; i++) { |
| 412 ctx->h[i] = sha512_h0[i]; |
| 413 } |
| 414 #else |
| 415 ctx->h[0] = sha512_h0[0]; ctx->h[1] = sha512_h0[1]; |
| 416 ctx->h[2] = sha512_h0[2]; ctx->h[3] = sha512_h0[3]; |
| 417 ctx->h[4] = sha512_h0[4]; ctx->h[5] = sha512_h0[5]; |
| 418 ctx->h[6] = sha512_h0[6]; ctx->h[7] = sha512_h0[7]; |
| 419 #endif /* !UNROLL_LOOPS */ |
| 420 |
| 421 ctx->len = 0; |
| 422 ctx->tot_len = 0; |
| 423 } |
| 424 |
| 425 |
| 426 static void SHA512_transform(SHA512_CTX* ctx, const uint8_t* message, |
| 427 unsigned int block_nb) |
| 428 { |
| 429 uint64_t w[80]; |
| 430 uint64_t wv[8]; |
| 431 uint64_t t1, t2; |
| 432 const uint8_t *sub_block; |
| 433 int i, j; |
| 434 |
| 435 for (i = 0; i < (int) block_nb; i++) { |
| 436 sub_block = message + (i << 7); |
| 437 |
| 438 #ifndef UNROLL_LOOPS |
| 439 for (j = 0; j < 16; j++) { |
| 440 PACK64(&sub_block[j << 3], &w[j]); |
| 441 } |
| 442 |
| 443 for (j = 16; j < 80; j++) { |
| 444 SHA512_SCR(j); |
| 445 } |
| 446 |
| 447 for (j = 0; j < 8; j++) { |
| 448 wv[j] = ctx->h[j]; |
| 449 } |
| 450 |
| 451 for (j = 0; j < 80; j++) { |
| 452 t1 = wv[7] + SHA512_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) |
| 453 + sha512_k[j] + w[j]; |
| 454 t2 = SHA512_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]); |
| 455 wv[7] = wv[6]; |
| 456 wv[6] = wv[5]; |
| 457 wv[5] = wv[4]; |
| 458 wv[4] = wv[3] + t1; |
| 459 wv[3] = wv[2]; |
| 460 wv[2] = wv[1]; |
| 461 wv[1] = wv[0]; |
| 462 wv[0] = t1 + t2; |
| 463 } |
| 464 |
| 465 for (j = 0; j < 8; j++) { |
| 466 ctx->h[j] += wv[j]; |
| 467 } |
| 468 #else |
| 469 PACK64(&sub_block[ 0], &w[ 0]); PACK64(&sub_block[ 8], &w[ 1]); |
| 470 PACK64(&sub_block[ 16], &w[ 2]); PACK64(&sub_block[ 24], &w[ 3]); |
| 471 PACK64(&sub_block[ 32], &w[ 4]); PACK64(&sub_block[ 40], &w[ 5]); |
| 472 PACK64(&sub_block[ 48], &w[ 6]); PACK64(&sub_block[ 56], &w[ 7]); |
| 473 PACK64(&sub_block[ 64], &w[ 8]); PACK64(&sub_block[ 72], &w[ 9]); |
| 474 PACK64(&sub_block[ 80], &w[10]); PACK64(&sub_block[ 88], &w[11]); |
| 475 PACK64(&sub_block[ 96], &w[12]); PACK64(&sub_block[104], &w[13]); |
| 476 PACK64(&sub_block[112], &w[14]); PACK64(&sub_block[120], &w[15]); |
| 477 |
| 478 SHA512_SCR(16); SHA512_SCR(17); SHA512_SCR(18); SHA512_SCR(19); |
| 479 SHA512_SCR(20); SHA512_SCR(21); SHA512_SCR(22); SHA512_SCR(23); |
| 480 SHA512_SCR(24); SHA512_SCR(25); SHA512_SCR(26); SHA512_SCR(27); |
| 481 SHA512_SCR(28); SHA512_SCR(29); SHA512_SCR(30); SHA512_SCR(31); |
| 482 SHA512_SCR(32); SHA512_SCR(33); SHA512_SCR(34); SHA512_SCR(35); |
| 483 SHA512_SCR(36); SHA512_SCR(37); SHA512_SCR(38); SHA512_SCR(39); |
| 484 SHA512_SCR(40); SHA512_SCR(41); SHA512_SCR(42); SHA512_SCR(43); |
| 485 SHA512_SCR(44); SHA512_SCR(45); SHA512_SCR(46); SHA512_SCR(47); |
| 486 SHA512_SCR(48); SHA512_SCR(49); SHA512_SCR(50); SHA512_SCR(51); |
| 487 SHA512_SCR(52); SHA512_SCR(53); SHA512_SCR(54); SHA512_SCR(55); |
| 488 SHA512_SCR(56); SHA512_SCR(57); SHA512_SCR(58); SHA512_SCR(59); |
| 489 SHA512_SCR(60); SHA512_SCR(61); SHA512_SCR(62); SHA512_SCR(63); |
| 490 SHA512_SCR(64); SHA512_SCR(65); SHA512_SCR(66); SHA512_SCR(67); |
| 491 SHA512_SCR(68); SHA512_SCR(69); SHA512_SCR(70); SHA512_SCR(71); |
| 492 SHA512_SCR(72); SHA512_SCR(73); SHA512_SCR(74); SHA512_SCR(75); |
| 493 SHA512_SCR(76); SHA512_SCR(77); SHA512_SCR(78); SHA512_SCR(79); |
| 494 |
| 495 wv[0] = ctx->h[0]; wv[1] = ctx->h[1]; |
| 496 wv[2] = ctx->h[2]; wv[3] = ctx->h[3]; |
| 497 wv[4] = ctx->h[4]; wv[5] = ctx->h[5]; |
| 498 wv[6] = ctx->h[6]; wv[7] = ctx->h[7]; |
| 499 |
| 500 j = 0; |
| 501 |
| 502 do { |
| 503 SHA512_EXP(0,1,2,3,4,5,6,7,j); j++; |
| 504 SHA512_EXP(7,0,1,2,3,4,5,6,j); j++; |
| 505 SHA512_EXP(6,7,0,1,2,3,4,5,j); j++; |
| 506 SHA512_EXP(5,6,7,0,1,2,3,4,j); j++; |
| 507 SHA512_EXP(4,5,6,7,0,1,2,3,j); j++; |
| 508 SHA512_EXP(3,4,5,6,7,0,1,2,j); j++; |
| 509 SHA512_EXP(2,3,4,5,6,7,0,1,j); j++; |
| 510 SHA512_EXP(1,2,3,4,5,6,7,0,j); j++; |
| 511 } while (j < 80); |
| 512 |
| 513 ctx->h[0] += wv[0]; ctx->h[1] += wv[1]; |
| 514 ctx->h[2] += wv[2]; ctx->h[3] += wv[3]; |
| 515 ctx->h[4] += wv[4]; ctx->h[5] += wv[5]; |
| 516 ctx->h[6] += wv[6]; ctx->h[7] += wv[7]; |
| 517 #endif /* !UNROLL_LOOPS */ |
| 518 } |
| 519 } |
| 520 |
| 521 |
| 522 void SHA512_update(SHA512_CTX* ctx, const uint8_t* data, |
| 523 int len) { |
| 524 unsigned int block_nb; |
| 525 unsigned int new_len, rem_len, tmp_len; |
| 526 const uint8_t* shifted_data; |
| 527 |
| 528 tmp_len = SHA512_BLOCK_SIZE - ctx->len; |
| 529 rem_len = len < tmp_len ? len : tmp_len; |
| 530 |
| 531 memcpy(&ctx->block[ctx->len], data, rem_len); |
| 532 |
| 533 if (ctx->len + len < SHA512_BLOCK_SIZE) { |
| 534 ctx->len += len; |
| 535 return; |
| 536 } |
| 537 |
| 538 new_len = len - rem_len; |
| 539 block_nb = new_len / SHA512_BLOCK_SIZE; |
| 540 |
| 541 shifted_data = data + rem_len; |
| 542 |
| 543 SHA512_transform(ctx, ctx->block, 1); |
| 544 SHA512_transform(ctx, shifted_data, block_nb); |
| 545 |
| 546 rem_len = new_len % SHA512_BLOCK_SIZE; |
| 547 |
| 548 memcpy(ctx->block, &shifted_data[block_nb << 7], |
| 549 rem_len); |
| 550 |
| 551 ctx->len = rem_len; |
| 552 ctx->tot_len += (block_nb + 1) << 7; |
| 553 } |
| 554 |
| 555 uint8_t* SHA512_final(SHA512_CTX* ctx) |
| 556 { |
| 557 unsigned int block_nb; |
| 558 unsigned int pm_len; |
| 559 unsigned int len_b; |
| 560 |
| 561 #ifndef UNROLL_LOOPS |
| 562 int i; |
| 563 #endif |
| 564 |
| 565 block_nb = 1 + ((SHA512_BLOCK_SIZE - 17) |
| 566 < (ctx->len % SHA512_BLOCK_SIZE)); |
| 567 |
| 568 len_b = (ctx->tot_len + ctx->len) << 3; |
| 569 pm_len = block_nb << 7; |
| 570 |
| 571 memset(ctx->block + ctx->len, 0, pm_len - ctx->len); |
| 572 ctx->block[ctx->len] = 0x80; |
| 573 UNPACK32(len_b, ctx->block + pm_len - 4); |
| 574 |
| 575 SHA512_transform(ctx, ctx->block, block_nb); |
| 576 |
| 577 #ifndef UNROLL_LOOPS |
| 578 for (i = 0 ; i < 8; i++) { |
| 579 UNPACK64(ctx->h[i], &ctx->buf[i << 3]); |
| 580 } |
| 581 #else |
| 582 UNPACK64(ctx->h[0], &ctx->buf[ 0]); |
| 583 UNPACK64(ctx->h[1], &ctx->buf[ 8]); |
| 584 UNPACK64(ctx->h[2], &ctx->buf[16]); |
| 585 UNPACK64(ctx->h[3], &ctx->buf[24]); |
| 586 UNPACK64(ctx->h[4], &ctx->buf[32]); |
| 587 UNPACK64(ctx->h[5], &ctx->buf[40]); |
| 588 UNPACK64(ctx->h[6], &ctx->buf[48]); |
| 589 UNPACK64(ctx->h[7], &ctx->buf[56]); |
| 590 #endif /* !UNROLL_LOOPS */ |
| 591 |
| 592 return ctx->buf; |
| 593 } |
| 594 |
| 595 |
| 596 |
| 597 /* Convenient functions. */ |
| 598 uint8_t* SHA256(const uint8_t* data, int len, uint8_t* digest) { |
| 599 const uint8_t* p; |
| 600 int i; |
| 601 SHA256_CTX ctx; |
| 602 SHA256_init(&ctx); |
| 603 SHA256_update(&ctx, data, len); |
| 604 p = SHA256_final(&ctx); |
| 605 for (i = 0; i < SHA256_DIGEST_SIZE; ++i) { |
| 606 digest[i] = *p++; |
| 607 } |
| 608 return digest; |
| 609 } |
| 610 |
| 611 |
| 612 uint8_t* SHA512(const uint8_t* data, int len, uint8_t* digest) { |
| 613 const uint8_t* p; |
| 614 int i; |
| 615 SHA512_CTX ctx; |
| 616 SHA512_init(&ctx); |
| 617 SHA512_update(&ctx, data, len); |
| 618 p = SHA512_final(&ctx); |
| 619 for (i = 0; i < SHA512_DIGEST_SIZE; ++i) { |
| 620 digest[i] = *p++; |
| 621 } |
| 622 return digest; |
| 623 } |
OLD | NEW |