OLD | NEW |
| (Empty) |
1 /*- | |
2 * Copyright 2009 Colin Percival | |
3 * All rights reserved. | |
4 * | |
5 * Redistribution and use in source and binary forms, with or without | |
6 * modification, are permitted provided that the following conditions | |
7 * are met: | |
8 * 1. Redistributions of source code must retain the above copyright | |
9 * notice, this list of conditions and the following disclaimer. | |
10 * 2. Redistributions in binary form must reproduce the above copyright | |
11 * notice, this list of conditions and the following disclaimer in the | |
12 * documentation and/or other materials provided with the distribution. | |
13 * | |
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | |
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | |
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
24 * SUCH DAMAGE. | |
25 * | |
26 * This file was originally written by Colin Percival as part of the Tarsnap | |
27 * online backup system. | |
28 */ | |
29 #include "scrypt_platform.h" | |
30 | |
31 #include <sys/types.h> | |
32 #include <sys/mman.h> | |
33 | |
34 #include <emmintrin.h> | |
35 #include <errno.h> | |
36 #include <stdint.h> | |
37 #include <stdlib.h> | |
38 #include <string.h> | |
39 | |
40 #include "sha256.h" | |
41 #include "sysendian.h" | |
42 | |
43 #include "crypto_scrypt.h" | |
44 | |
45 static void blkcpy(void *, void *, size_t); | |
46 static void blkxor(void *, void *, size_t); | |
47 static void salsa20_8(__m128i *); | |
48 static void blockmix_salsa8(__m128i *, __m128i *, __m128i *, size_t); | |
49 static uint64_t integerify(void *, size_t); | |
50 static void smix(uint8_t *, size_t, uint64_t, void *, void *); | |
51 | |
52 static void | |
53 blkcpy(void * dest, void * src, size_t len) | |
54 { | |
55 __m128i * D = dest; | |
56 __m128i * S = src; | |
57 size_t L = len / 16; | |
58 size_t i; | |
59 | |
60 for (i = 0; i < L; i++) | |
61 D[i] = S[i]; | |
62 } | |
63 | |
64 static void | |
65 blkxor(void * dest, void * src, size_t len) | |
66 { | |
67 __m128i * D = dest; | |
68 __m128i * S = src; | |
69 size_t L = len / 16; | |
70 size_t i; | |
71 | |
72 for (i = 0; i < L; i++) | |
73 D[i] = _mm_xor_si128(D[i], S[i]); | |
74 } | |
75 | |
76 /** | |
77 * salsa20_8(B): | |
78 * Apply the salsa20/8 core to the provided block. | |
79 */ | |
80 static void | |
81 salsa20_8(__m128i B[4]) | |
82 { | |
83 __m128i X0, X1, X2, X3; | |
84 __m128i T; | |
85 size_t i; | |
86 | |
87 X0 = B[0]; | |
88 X1 = B[1]; | |
89 X2 = B[2]; | |
90 X3 = B[3]; | |
91 | |
92 for (i = 0; i < 8; i += 2) { | |
93 /* Operate on "columns". */ | |
94 T = _mm_add_epi32(X0, X3); | |
95 X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 7)); | |
96 X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 25)); | |
97 T = _mm_add_epi32(X1, X0); | |
98 X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9)); | |
99 X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23)); | |
100 T = _mm_add_epi32(X2, X1); | |
101 X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 13)); | |
102 X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 19)); | |
103 T = _mm_add_epi32(X3, X2); | |
104 X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18)); | |
105 X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14)); | |
106 | |
107 /* Rearrange data. */ | |
108 X1 = _mm_shuffle_epi32(X1, 0x93); | |
109 X2 = _mm_shuffle_epi32(X2, 0x4E); | |
110 X3 = _mm_shuffle_epi32(X3, 0x39); | |
111 | |
112 /* Operate on "rows". */ | |
113 T = _mm_add_epi32(X0, X1); | |
114 X3 = _mm_xor_si128(X3, _mm_slli_epi32(T, 7)); | |
115 X3 = _mm_xor_si128(X3, _mm_srli_epi32(T, 25)); | |
116 T = _mm_add_epi32(X3, X0); | |
117 X2 = _mm_xor_si128(X2, _mm_slli_epi32(T, 9)); | |
118 X2 = _mm_xor_si128(X2, _mm_srli_epi32(T, 23)); | |
119 T = _mm_add_epi32(X2, X3); | |
120 X1 = _mm_xor_si128(X1, _mm_slli_epi32(T, 13)); | |
121 X1 = _mm_xor_si128(X1, _mm_srli_epi32(T, 19)); | |
122 T = _mm_add_epi32(X1, X2); | |
123 X0 = _mm_xor_si128(X0, _mm_slli_epi32(T, 18)); | |
124 X0 = _mm_xor_si128(X0, _mm_srli_epi32(T, 14)); | |
125 | |
126 /* Rearrange data. */ | |
127 X1 = _mm_shuffle_epi32(X1, 0x39); | |
128 X2 = _mm_shuffle_epi32(X2, 0x4E); | |
129 X3 = _mm_shuffle_epi32(X3, 0x93); | |
130 } | |
131 | |
132 B[0] = _mm_add_epi32(B[0], X0); | |
133 B[1] = _mm_add_epi32(B[1], X1); | |
134 B[2] = _mm_add_epi32(B[2], X2); | |
135 B[3] = _mm_add_epi32(B[3], X3); | |
136 } | |
137 | |
138 /** | |
139 * blockmix_salsa8(Bin, Bout, X, r): | |
140 * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r | |
141 * bytes in length; the output Bout must also be the same size. The | |
142 * temporary space X must be 64 bytes. | |
143 */ | |
144 static void | |
145 blockmix_salsa8(__m128i * Bin, __m128i * Bout, __m128i * X, size_t r) | |
146 { | |
147 size_t i; | |
148 | |
149 /* 1: X <-- B_{2r - 1} */ | |
150 blkcpy(X, &Bin[8 * r - 4], 64); | |
151 | |
152 /* 2: for i = 0 to 2r - 1 do */ | |
153 for (i = 0; i < r; i++) { | |
154 /* 3: X <-- H(X \xor B_i) */ | |
155 blkxor(X, &Bin[i * 8], 64); | |
156 salsa20_8(X); | |
157 | |
158 /* 4: Y_i <-- X */ | |
159 /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ | |
160 blkcpy(&Bout[i * 4], X, 64); | |
161 | |
162 /* 3: X <-- H(X \xor B_i) */ | |
163 blkxor(X, &Bin[i * 8 + 4], 64); | |
164 salsa20_8(X); | |
165 | |
166 /* 4: Y_i <-- X */ | |
167 /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ | |
168 blkcpy(&Bout[(r + i) * 4], X, 64); | |
169 } | |
170 } | |
171 | |
172 /** | |
173 * integerify(B, r): | |
174 * Return the result of parsing B_{2r-1} as a little-endian integer. | |
175 */ | |
176 static uint64_t | |
177 integerify(void * B, size_t r) | |
178 { | |
179 uint32_t * X = (void *)((uintptr_t)(B) + (2 * r - 1) * 64); | |
180 | |
181 return (((uint64_t)(X[13]) << 32) + X[0]); | |
182 } | |
183 | |
184 /** | |
185 * smix(B, r, N, V, XY): | |
186 * Compute B = SMix_r(B, N). The input B must be 128r bytes in length; | |
187 * the temporary storage V must be 128rN bytes in length; the temporary | |
188 * storage XY must be 256r + 64 bytes in length. The value N must be a | |
189 * power of 2 greater than 1. The arrays B, V, and XY must be aligned to a | |
190 * multiple of 64 bytes. | |
191 */ | |
192 static void | |
193 smix(uint8_t * B, size_t r, uint64_t N, void * V, void * XY) | |
194 { | |
195 __m128i * X = XY; | |
196 __m128i * Y = (void *)((uintptr_t)(XY) + 128 * r); | |
197 __m128i * Z = (void *)((uintptr_t)(XY) + 256 * r); | |
198 uint32_t * X32 = (void *)X; | |
199 uint64_t i, j; | |
200 size_t k; | |
201 | |
202 /* 1: X <-- B */ | |
203 for (k = 0; k < 2 * r; k++) { | |
204 for (i = 0; i < 16; i++) { | |
205 X32[k * 16 + i] = | |
206 le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); | |
207 } | |
208 } | |
209 | |
210 /* 2: for i = 0 to N - 1 do */ | |
211 for (i = 0; i < N; i += 2) { | |
212 /* 3: V_i <-- X */ | |
213 blkcpy((void *)((uintptr_t)(V) + i * 128 * r), X, 128 * r); | |
214 | |
215 /* 4: X <-- H(X) */ | |
216 blockmix_salsa8(X, Y, Z, r); | |
217 | |
218 /* 3: V_i <-- X */ | |
219 blkcpy((void *)((uintptr_t)(V) + (i + 1) * 128 * r), | |
220 Y, 128 * r); | |
221 | |
222 /* 4: X <-- H(X) */ | |
223 blockmix_salsa8(Y, X, Z, r); | |
224 } | |
225 | |
226 /* 6: for i = 0 to N - 1 do */ | |
227 for (i = 0; i < N; i += 2) { | |
228 /* 7: j <-- Integerify(X) mod N */ | |
229 j = integerify(X, r) & (N - 1); | |
230 | |
231 /* 8: X <-- H(X \xor V_j) */ | |
232 blkxor(X, (void *)((uintptr_t)(V) + j * 128 * r), 128 * r); | |
233 blockmix_salsa8(X, Y, Z, r); | |
234 | |
235 /* 7: j <-- Integerify(X) mod N */ | |
236 j = integerify(Y, r) & (N - 1); | |
237 | |
238 /* 8: X <-- H(X \xor V_j) */ | |
239 blkxor(Y, (void *)((uintptr_t)(V) + j * 128 * r), 128 * r); | |
240 blockmix_salsa8(Y, X, Z, r); | |
241 } | |
242 | |
243 /* 10: B' <-- X */ | |
244 for (k = 0; k < 2 * r; k++) { | |
245 for (i = 0; i < 16; i++) { | |
246 le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], | |
247 X32[k * 16 + i]); | |
248 } | |
249 } | |
250 } | |
251 | |
252 /** | |
253 * crypto_scrypt(passwd, passwdlen, salt, saltlen, N, r, p, buf, buflen): | |
254 * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r, | |
255 * p, buflen) and write the result into buf. The parameters r, p, and buflen | |
256 * must satisfy r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N | |
257 * must be a power of 2 greater than 1. | |
258 * | |
259 * Return 0 on success; or -1 on error. | |
260 */ | |
261 int | |
262 crypto_scrypt(const uint8_t * passwd, size_t passwdlen, | |
263 const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p, | |
264 uint8_t * buf, size_t buflen) | |
265 { | |
266 void * B0, * V0, * XY0; | |
267 uint8_t * B; | |
268 uint32_t * V; | |
269 uint32_t * XY; | |
270 uint32_t i; | |
271 | |
272 /* Sanity-check parameters. */ | |
273 #if SIZE_MAX > UINT32_MAX | |
274 if (buflen > (((uint64_t)(1) << 32) - 1) * 32) { | |
275 errno = EFBIG; | |
276 goto err0; | |
277 } | |
278 #endif | |
279 if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) { | |
280 errno = EFBIG; | |
281 goto err0; | |
282 } | |
283 if (((N & (N - 1)) != 0) || (N == 0)) { | |
284 errno = EINVAL; | |
285 goto err0; | |
286 } | |
287 if ((r > SIZE_MAX / 128 / p) || | |
288 #if SIZE_MAX / 256 <= UINT32_MAX | |
289 (r > (SIZE_MAX - 64) / 256) || | |
290 #endif | |
291 (N > SIZE_MAX / 128 / r)) { | |
292 errno = ENOMEM; | |
293 goto err0; | |
294 } | |
295 | |
296 /* Allocate memory. */ | |
297 #ifdef HAVE_POSIX_MEMALIGN | |
298 if ((errno = posix_memalign(&B0, 64, 128 * r * p)) != 0) | |
299 goto err0; | |
300 B = (uint8_t *)(B0); | |
301 if ((errno = posix_memalign(&XY0, 64, 256 * r + 64)) != 0) | |
302 goto err1; | |
303 XY = (uint32_t *)(XY0); | |
304 #ifndef MAP_ANON | |
305 if ((errno = posix_memalign(&V0, 64, 128 * r * N)) != 0) | |
306 goto err2; | |
307 V = (uint32_t *)(V0); | |
308 #endif | |
309 #else | |
310 if ((B0 = malloc(128 * r * p + 63)) == NULL) | |
311 goto err0; | |
312 B = (uint8_t *)(((uintptr_t)(B0) + 63) & ~ (uintptr_t)(63)); | |
313 if ((XY0 = malloc(256 * r + 64 + 63)) == NULL) | |
314 goto err1; | |
315 XY = (uint32_t *)(((uintptr_t)(XY0) + 63) & ~ (uintptr_t)(63)); | |
316 #ifndef MAP_ANON | |
317 if ((V0 = malloc(128 * r * N + 63)) == NULL) | |
318 goto err2; | |
319 V = (uint32_t *)(((uintptr_t)(V0) + 63) & ~ (uintptr_t)(63)); | |
320 #endif | |
321 #endif | |
322 #ifdef MAP_ANON | |
323 if ((V0 = mmap(NULL, 128 * r * N, PROT_READ | PROT_WRITE, | |
324 #ifdef MAP_NOCORE | |
325 MAP_ANON | MAP_PRIVATE | MAP_NOCORE, | |
326 #else | |
327 MAP_ANON | MAP_PRIVATE, | |
328 #endif | |
329 -1, 0)) == MAP_FAILED) | |
330 goto err2; | |
331 V = (uint32_t *)(V0); | |
332 #endif | |
333 | |
334 /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */ | |
335 PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, p * 128 * r); | |
336 | |
337 /* 2: for i = 0 to p - 1 do */ | |
338 for (i = 0; i < p; i++) { | |
339 /* 3: B_i <-- MF(B_i, N) */ | |
340 smix(&B[i * 128 * r], r, N, V, XY); | |
341 } | |
342 | |
343 /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */ | |
344 PBKDF2_SHA256(passwd, passwdlen, B, p * 128 * r, 1, buf, buflen); | |
345 | |
346 /* Free memory. */ | |
347 #ifdef MAP_ANON | |
348 if (munmap(V0, 128 * r * N)) | |
349 goto err2; | |
350 #else | |
351 free(V0); | |
352 #endif | |
353 free(XY0); | |
354 free(B0); | |
355 | |
356 /* Success! */ | |
357 return (0); | |
358 | |
359 err2: | |
360 free(XY0); | |
361 err1: | |
362 free(B0); | |
363 err0: | |
364 /* Failure! */ | |
365 return (-1); | |
366 } | |
OLD | NEW |