Index: net/third_party/nss/patches/tls-srp.patch |
diff --git a/net/third_party/nss/patches/tls-srp.patch b/net/third_party/nss/patches/tls-srp.patch |
new file mode 100644 |
index 0000000000000000000000000000000000000000..2095a5c79f9261e6f0132e61d853c7e481ef7d0d |
--- /dev/null |
+++ b/net/third_party/nss/patches/tls-srp.patch |
@@ -0,0 +1,11090 @@ |
+diff --git a/net/third_party/nss/ssl/mpi/logtab.h b/net/third_party/nss/ssl/mpi/logtab.h |
+new file mode 100644 |
+index 0000000..41badfc |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/mpi/logtab.h |
+@@ -0,0 +1,62 @@ |
++/* |
++ * logtab.h |
++ * |
++ * Arbitrary precision integer arithmetic library |
++ * |
++ * ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Original Code is the MPI Arbitrary Precision Integer Arithmetic library. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Michael J. Fromberger. |
++ * Portions created by the Initial Developer are Copyright (C) 1998 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++/* $Id: logtab.h,v 1.5 2004/04/27 23:04:36 gerv%gerv.net Exp $ */ |
++ |
++const float s_logv_2[] = { |
++ 0.000000000f, 0.000000000f, 1.000000000f, 0.630929754f, /* 0 1 2 3 */ |
++ 0.500000000f, 0.430676558f, 0.386852807f, 0.356207187f, /* 4 5 6 7 */ |
++ 0.333333333f, 0.315464877f, 0.301029996f, 0.289064826f, /* 8 9 10 11 */ |
++ 0.278942946f, 0.270238154f, 0.262649535f, 0.255958025f, /* 12 13 14 15 */ |
++ 0.250000000f, 0.244650542f, 0.239812467f, 0.235408913f, /* 16 17 18 19 */ |
++ 0.231378213f, 0.227670249f, 0.224243824f, 0.221064729f, /* 20 21 22 23 */ |
++ 0.218104292f, 0.215338279f, 0.212746054f, 0.210309918f, /* 24 25 26 27 */ |
++ 0.208014598f, 0.205846832f, 0.203795047f, 0.201849087f, /* 28 29 30 31 */ |
++ 0.200000000f, 0.198239863f, 0.196561632f, 0.194959022f, /* 32 33 34 35 */ |
++ 0.193426404f, 0.191958720f, 0.190551412f, 0.189200360f, /* 36 37 38 39 */ |
++ 0.187901825f, 0.186652411f, 0.185449023f, 0.184288833f, /* 40 41 42 43 */ |
++ 0.183169251f, 0.182087900f, 0.181042597f, 0.180031327f, /* 44 45 46 47 */ |
++ 0.179052232f, 0.178103594f, 0.177183820f, 0.176291434f, /* 48 49 50 51 */ |
++ 0.175425064f, 0.174583430f, 0.173765343f, 0.172969690f, /* 52 53 54 55 */ |
++ 0.172195434f, 0.171441601f, 0.170707280f, 0.169991616f, /* 56 57 58 59 */ |
++ 0.169293808f, 0.168613099f, 0.167948779f, 0.167300179f, /* 60 61 62 63 */ |
++ 0.166666667f |
++}; |
++ |
+diff --git a/net/third_party/nss/ssl/mpi/mpcpucache.c b/net/third_party/nss/ssl/mpi/mpcpucache.c |
+new file mode 100644 |
+index 0000000..6efa072 |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/mpi/mpcpucache.c |
+@@ -0,0 +1,838 @@ |
++/* ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Original Code is the Netscape security libraries. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Red Hat, Inc |
++ * Portions created by the Initial Developer are Copyright (C) 2005 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * Robert Relyea <rrelyea@redhat.com> |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++ |
++#include "mpi.h" |
++ |
++/* |
++ * This file implements a single function: s_mpi_getProcessorLineSize(); |
++ * s_mpi_getProcessorLineSize() returns the size in bytes of the cache line |
++ * if a cache exists, or zero if there is no cache. If more than one |
++ * cache line exists, it should return the smallest line size (which is |
++ * usually the L1 cache). |
++ * |
++ * mp_modexp uses this information to make sure that private key information |
++ * isn't being leaked through the cache. |
++ * |
++ * Currently the file returns good data for most modern x86 processors, and |
++ * reasonable data on 64-bit ppc processors. All other processors are assumed |
++ * to have a cache line size of 32 bytes unless modified by target.mk. |
++ * |
++ */ |
++ |
++#if defined(i386) || defined(__i386) || defined(__X86__) || defined (_M_IX86) || defined(__x86_64__) || defined(__x86_64) || defined(_M_AMD64) |
++/* X86 processors have special instructions that tell us about the cache */ |
++#include "string.h" |
++ |
++#if defined(__x86_64__) || defined(__x86_64) || defined(_M_AMD64) |
++#define AMD_64 1 |
++#endif |
++ |
++/* Generic CPUID function */ |
++#if defined(AMD_64) |
++ |
++#if defined(__GNUC__) |
++ |
++void freebl_cpuid(unsigned long op, unsigned long *eax, |
++ unsigned long *ebx, unsigned long *ecx, |
++ unsigned long *edx) |
++{ |
++ __asm__("cpuid\n\t" |
++ : "=a" (*eax), |
++ "=b" (*ebx), |
++ "=c" (*ecx), |
++ "=d" (*edx) |
++ : "0" (op)); |
++} |
++ |
++#elif defined(_MSC_VER) |
++ |
++#include <intrin.h> |
++ |
++void freebl_cpuid(unsigned long op, unsigned long *eax, |
++ unsigned long *ebx, unsigned long *ecx, |
++ unsigned long *edx) |
++{ |
++ int intrinsic_out[4]; |
++ |
++ __cpuid(intrinsic_out, op); |
++ *eax = intrinsic_out[0]; |
++ *ebx = intrinsic_out[1]; |
++ *ecx = intrinsic_out[2]; |
++ *edx = intrinsic_out[3]; |
++} |
++ |
++#endif |
++ |
++#else /* !defined(AMD_64) */ |
++ |
++/* x86 */ |
++ |
++#if defined(__GNUC__) |
++void freebl_cpuid(unsigned long op, unsigned long *eax, |
++ unsigned long *ebx, unsigned long *ecx, |
++ unsigned long *edx) |
++{ |
++/* sigh GCC isn't smart enough to save the ebx PIC register on it's own |
++ * in this case, so do it by hand. */ |
++ __asm__("pushl %%ebx\n\t" |
++ "cpuid\n\t" |
++ "mov %%ebx,%1\n\t" |
++ "popl %%ebx\n\t" |
++ : "=a" (*eax), |
++ "=r" (*ebx), |
++ "=c" (*ecx), |
++ "=d" (*edx) |
++ : "0" (op)); |
++} |
++ |
++/* |
++ * try flipping a processor flag to determine CPU type |
++ */ |
++static unsigned long changeFlag(unsigned long flag) |
++{ |
++ unsigned long changedFlags, originalFlags; |
++ __asm__("pushfl\n\t" /* get the flags */ |
++ "popl %0\n\t" |
++ "movl %0,%1\n\t" /* save the original flags */ |
++ "xorl %2,%0\n\t" /* flip the bit */ |
++ "pushl %0\n\t" /* set the flags */ |
++ "popfl\n\t" |
++ "pushfl\n\t" /* get the flags again (for return) */ |
++ "popl %0\n\t" |
++ "pushl %1\n\t" /* restore the original flags */ |
++ "popfl\n\t" |
++ : "=r" (changedFlags), |
++ "=r" (originalFlags), |
++ "=r" (flag) |
++ : "2" (flag)); |
++ return changedFlags ^ originalFlags; |
++} |
++ |
++#elif defined(_MSC_VER) |
++ |
++/* |
++ * windows versions of the above assembler |
++ */ |
++#define wcpuid __asm __emit 0fh __asm __emit 0a2h |
++void freebl_cpuid(unsigned long op, unsigned long *Reax, |
++ unsigned long *Rebx, unsigned long *Recx, unsigned long *Redx) |
++{ |
++ unsigned long Leax, Lebx, Lecx, Ledx; |
++ __asm { |
++ pushad |
++ mov eax,op |
++ wcpuid |
++ mov Leax,eax |
++ mov Lebx,ebx |
++ mov Lecx,ecx |
++ mov Ledx,edx |
++ popad |
++ } |
++ *Reax = Leax; |
++ *Rebx = Lebx; |
++ *Recx = Lecx; |
++ *Redx = Ledx; |
++} |
++ |
++static unsigned long changeFlag(unsigned long flag) |
++{ |
++ unsigned long changedFlags, originalFlags; |
++ __asm { |
++ push eax |
++ push ebx |
++ pushfd /* get the flags */ |
++ pop eax |
++ push eax /* save the flags on the stack */ |
++ mov originalFlags,eax /* save the original flags */ |
++ mov ebx,flag |
++ xor eax,ebx /* flip the bit */ |
++ push eax /* set the flags */ |
++ popfd |
++ pushfd /* get the flags again (for return) */ |
++ pop eax |
++ popfd /* restore the original flags */ |
++ mov changedFlags,eax |
++ pop ebx |
++ pop eax |
++ } |
++ return changedFlags ^ originalFlags; |
++} |
++#endif |
++ |
++#endif |
++ |
++#if !defined(AMD_64) |
++#define AC_FLAG 0x40000 |
++#define ID_FLAG 0x200000 |
++ |
++/* 386 processors can't flip the AC_FLAG, intel AP Note AP-485 */ |
++static int is386() |
++{ |
++ return changeFlag(AC_FLAG) == 0; |
++} |
++ |
++/* 486 processors can't flip the ID_FLAG, intel AP Note AP-485 */ |
++static int is486() |
++{ |
++ return changeFlag(ID_FLAG) == 0; |
++} |
++#endif |
++ |
++ |
++/* |
++ * table for Intel Cache. |
++ * See Intel Application Note AP-485 for more information |
++ */ |
++ |
++typedef unsigned char CacheTypeEntry; |
++ |
++typedef enum { |
++ Cache_NONE = 0, |
++ Cache_UNKNOWN = 1, |
++ Cache_TLB = 2, |
++ Cache_TLBi = 3, |
++ Cache_TLBd = 4, |
++ Cache_Trace = 5, |
++ Cache_L1 = 6, |
++ Cache_L1i = 7, |
++ Cache_L1d = 8, |
++ Cache_L2 = 9 , |
++ Cache_L2i = 10 , |
++ Cache_L2d = 11 , |
++ Cache_L3 = 12 , |
++ Cache_L3i = 13, |
++ Cache_L3d = 14 |
++} CacheType; |
++ |
++struct _cache { |
++ CacheTypeEntry type; |
++ unsigned char lineSize; |
++}; |
++static const struct _cache CacheMap[256] = { |
++/* 00 */ {Cache_NONE, 0 }, |
++/* 01 */ {Cache_TLBi, 0 }, |
++/* 02 */ {Cache_TLBi, 0 }, |
++/* 03 */ {Cache_TLBd, 0 }, |
++/* 04 */ {Cache_TLBd, }, |
++/* 05 */ {Cache_UNKNOWN, 0 }, |
++/* 06 */ {Cache_L1i, 32 }, |
++/* 07 */ {Cache_UNKNOWN, 0 }, |
++/* 08 */ {Cache_L1i, 32 }, |
++/* 09 */ {Cache_UNKNOWN, 0 }, |
++/* 0a */ {Cache_L1d, 32 }, |
++/* 0b */ {Cache_UNKNOWN, 0 }, |
++/* 0c */ {Cache_L1d, 32 }, |
++/* 0d */ {Cache_UNKNOWN, 0 }, |
++/* 0e */ {Cache_UNKNOWN, 0 }, |
++/* 0f */ {Cache_UNKNOWN, 0 }, |
++/* 10 */ {Cache_UNKNOWN, 0 }, |
++/* 11 */ {Cache_UNKNOWN, 0 }, |
++/* 12 */ {Cache_UNKNOWN, 0 }, |
++/* 13 */ {Cache_UNKNOWN, 0 }, |
++/* 14 */ {Cache_UNKNOWN, 0 }, |
++/* 15 */ {Cache_UNKNOWN, 0 }, |
++/* 16 */ {Cache_UNKNOWN, 0 }, |
++/* 17 */ {Cache_UNKNOWN, 0 }, |
++/* 18 */ {Cache_UNKNOWN, 0 }, |
++/* 19 */ {Cache_UNKNOWN, 0 }, |
++/* 1a */ {Cache_UNKNOWN, 0 }, |
++/* 1b */ {Cache_UNKNOWN, 0 }, |
++/* 1c */ {Cache_UNKNOWN, 0 }, |
++/* 1d */ {Cache_UNKNOWN, 0 }, |
++/* 1e */ {Cache_UNKNOWN, 0 }, |
++/* 1f */ {Cache_UNKNOWN, 0 }, |
++/* 20 */ {Cache_UNKNOWN, 0 }, |
++/* 21 */ {Cache_UNKNOWN, 0 }, |
++/* 22 */ {Cache_L3, 64 }, |
++/* 23 */ {Cache_L3, 64 }, |
++/* 24 */ {Cache_UNKNOWN, 0 }, |
++/* 25 */ {Cache_L3, 64 }, |
++/* 26 */ {Cache_UNKNOWN, 0 }, |
++/* 27 */ {Cache_UNKNOWN, 0 }, |
++/* 28 */ {Cache_UNKNOWN, 0 }, |
++/* 29 */ {Cache_L3, 64 }, |
++/* 2a */ {Cache_UNKNOWN, 0 }, |
++/* 2b */ {Cache_UNKNOWN, 0 }, |
++/* 2c */ {Cache_L1d, 64 }, |
++/* 2d */ {Cache_UNKNOWN, 0 }, |
++/* 2e */ {Cache_UNKNOWN, 0 }, |
++/* 2f */ {Cache_UNKNOWN, 0 }, |
++/* 30 */ {Cache_L1i, 64 }, |
++/* 31 */ {Cache_UNKNOWN, 0 }, |
++/* 32 */ {Cache_UNKNOWN, 0 }, |
++/* 33 */ {Cache_UNKNOWN, 0 }, |
++/* 34 */ {Cache_UNKNOWN, 0 }, |
++/* 35 */ {Cache_UNKNOWN, 0 }, |
++/* 36 */ {Cache_UNKNOWN, 0 }, |
++/* 37 */ {Cache_UNKNOWN, 0 }, |
++/* 38 */ {Cache_UNKNOWN, 0 }, |
++/* 39 */ {Cache_L2, 64 }, |
++/* 3a */ {Cache_UNKNOWN, 0 }, |
++/* 3b */ {Cache_L2, 64 }, |
++/* 3c */ {Cache_L2, 64 }, |
++/* 3d */ {Cache_UNKNOWN, 0 }, |
++/* 3e */ {Cache_UNKNOWN, 0 }, |
++/* 3f */ {Cache_UNKNOWN, 0 }, |
++/* 40 */ {Cache_L2, 0 }, |
++/* 41 */ {Cache_L2, 32 }, |
++/* 42 */ {Cache_L2, 32 }, |
++/* 43 */ {Cache_L2, 32 }, |
++/* 44 */ {Cache_L2, 32 }, |
++/* 45 */ {Cache_L2, 32 }, |
++/* 46 */ {Cache_UNKNOWN, 0 }, |
++/* 47 */ {Cache_UNKNOWN, 0 }, |
++/* 48 */ {Cache_UNKNOWN, 0 }, |
++/* 49 */ {Cache_UNKNOWN, 0 }, |
++/* 4a */ {Cache_UNKNOWN, 0 }, |
++/* 4b */ {Cache_UNKNOWN, 0 }, |
++/* 4c */ {Cache_UNKNOWN, 0 }, |
++/* 4d */ {Cache_UNKNOWN, 0 }, |
++/* 4e */ {Cache_UNKNOWN, 0 }, |
++/* 4f */ {Cache_UNKNOWN, 0 }, |
++/* 50 */ {Cache_TLBi, 0 }, |
++/* 51 */ {Cache_TLBi, 0 }, |
++/* 52 */ {Cache_TLBi, 0 }, |
++/* 53 */ {Cache_UNKNOWN, 0 }, |
++/* 54 */ {Cache_UNKNOWN, 0 }, |
++/* 55 */ {Cache_UNKNOWN, 0 }, |
++/* 56 */ {Cache_UNKNOWN, 0 }, |
++/* 57 */ {Cache_UNKNOWN, 0 }, |
++/* 58 */ {Cache_UNKNOWN, 0 }, |
++/* 59 */ {Cache_UNKNOWN, 0 }, |
++/* 5a */ {Cache_UNKNOWN, 0 }, |
++/* 5b */ {Cache_TLBd, 0 }, |
++/* 5c */ {Cache_TLBd, 0 }, |
++/* 5d */ {Cache_TLBd, 0 }, |
++/* 5e */ {Cache_UNKNOWN, 0 }, |
++/* 5f */ {Cache_UNKNOWN, 0 }, |
++/* 60 */ {Cache_UNKNOWN, 0 }, |
++/* 61 */ {Cache_UNKNOWN, 0 }, |
++/* 62 */ {Cache_UNKNOWN, 0 }, |
++/* 63 */ {Cache_UNKNOWN, 0 }, |
++/* 64 */ {Cache_UNKNOWN, 0 }, |
++/* 65 */ {Cache_UNKNOWN, 0 }, |
++/* 66 */ {Cache_L1d, 64 }, |
++/* 67 */ {Cache_L1d, 64 }, |
++/* 68 */ {Cache_L1d, 64 }, |
++/* 69 */ {Cache_UNKNOWN, 0 }, |
++/* 6a */ {Cache_UNKNOWN, 0 }, |
++/* 6b */ {Cache_UNKNOWN, 0 }, |
++/* 6c */ {Cache_UNKNOWN, 0 }, |
++/* 6d */ {Cache_UNKNOWN, 0 }, |
++/* 6e */ {Cache_UNKNOWN, 0 }, |
++/* 6f */ {Cache_UNKNOWN, 0 }, |
++/* 70 */ {Cache_Trace, 1 }, |
++/* 71 */ {Cache_Trace, 1 }, |
++/* 72 */ {Cache_Trace, 1 }, |
++/* 73 */ {Cache_UNKNOWN, 0 }, |
++/* 74 */ {Cache_UNKNOWN, 0 }, |
++/* 75 */ {Cache_UNKNOWN, 0 }, |
++/* 76 */ {Cache_UNKNOWN, 0 }, |
++/* 77 */ {Cache_UNKNOWN, 0 }, |
++/* 78 */ {Cache_UNKNOWN, 0 }, |
++/* 79 */ {Cache_L2, 64 }, |
++/* 7a */ {Cache_L2, 64 }, |
++/* 7b */ {Cache_L2, 64 }, |
++/* 7c */ {Cache_L2, 64 }, |
++/* 7d */ {Cache_UNKNOWN, 0 }, |
++/* 7e */ {Cache_UNKNOWN, 0 }, |
++/* 7f */ {Cache_UNKNOWN, 0 }, |
++/* 80 */ {Cache_UNKNOWN, 0 }, |
++/* 81 */ {Cache_UNKNOWN, 0 }, |
++/* 82 */ {Cache_L2, 32 }, |
++/* 83 */ {Cache_L2, 32 }, |
++/* 84 */ {Cache_L2, 32 }, |
++/* 85 */ {Cache_L2, 32 }, |
++/* 86 */ {Cache_L2, 64 }, |
++/* 87 */ {Cache_L2, 64 }, |
++/* 88 */ {Cache_UNKNOWN, 0 }, |
++/* 89 */ {Cache_UNKNOWN, 0 }, |
++/* 8a */ {Cache_UNKNOWN, 0 }, |
++/* 8b */ {Cache_UNKNOWN, 0 }, |
++/* 8c */ {Cache_UNKNOWN, 0 }, |
++/* 8d */ {Cache_UNKNOWN, 0 }, |
++/* 8e */ {Cache_UNKNOWN, 0 }, |
++/* 8f */ {Cache_UNKNOWN, 0 }, |
++/* 90 */ {Cache_UNKNOWN, 0 }, |
++/* 91 */ {Cache_UNKNOWN, 0 }, |
++/* 92 */ {Cache_UNKNOWN, 0 }, |
++/* 93 */ {Cache_UNKNOWN, 0 }, |
++/* 94 */ {Cache_UNKNOWN, 0 }, |
++/* 95 */ {Cache_UNKNOWN, 0 }, |
++/* 96 */ {Cache_UNKNOWN, 0 }, |
++/* 97 */ {Cache_UNKNOWN, 0 }, |
++/* 98 */ {Cache_UNKNOWN, 0 }, |
++/* 99 */ {Cache_UNKNOWN, 0 }, |
++/* 9a */ {Cache_UNKNOWN, 0 }, |
++/* 9b */ {Cache_UNKNOWN, 0 }, |
++/* 9c */ {Cache_UNKNOWN, 0 }, |
++/* 9d */ {Cache_UNKNOWN, 0 }, |
++/* 9e */ {Cache_UNKNOWN, 0 }, |
++/* 9f */ {Cache_UNKNOWN, 0 }, |
++/* a0 */ {Cache_UNKNOWN, 0 }, |
++/* a1 */ {Cache_UNKNOWN, 0 }, |
++/* a2 */ {Cache_UNKNOWN, 0 }, |
++/* a3 */ {Cache_UNKNOWN, 0 }, |
++/* a4 */ {Cache_UNKNOWN, 0 }, |
++/* a5 */ {Cache_UNKNOWN, 0 }, |
++/* a6 */ {Cache_UNKNOWN, 0 }, |
++/* a7 */ {Cache_UNKNOWN, 0 }, |
++/* a8 */ {Cache_UNKNOWN, 0 }, |
++/* a9 */ {Cache_UNKNOWN, 0 }, |
++/* aa */ {Cache_UNKNOWN, 0 }, |
++/* ab */ {Cache_UNKNOWN, 0 }, |
++/* ac */ {Cache_UNKNOWN, 0 }, |
++/* ad */ {Cache_UNKNOWN, 0 }, |
++/* ae */ {Cache_UNKNOWN, 0 }, |
++/* af */ {Cache_UNKNOWN, 0 }, |
++/* b0 */ {Cache_TLBi, 0 }, |
++/* b1 */ {Cache_UNKNOWN, 0 }, |
++/* b2 */ {Cache_UNKNOWN, 0 }, |
++/* b3 */ {Cache_TLBd, 0 }, |
++/* b4 */ {Cache_UNKNOWN, 0 }, |
++/* b5 */ {Cache_UNKNOWN, 0 }, |
++/* b6 */ {Cache_UNKNOWN, 0 }, |
++/* b7 */ {Cache_UNKNOWN, 0 }, |
++/* b8 */ {Cache_UNKNOWN, 0 }, |
++/* b9 */ {Cache_UNKNOWN, 0 }, |
++/* ba */ {Cache_UNKNOWN, 0 }, |
++/* bb */ {Cache_UNKNOWN, 0 }, |
++/* bc */ {Cache_UNKNOWN, 0 }, |
++/* bd */ {Cache_UNKNOWN, 0 }, |
++/* be */ {Cache_UNKNOWN, 0 }, |
++/* bf */ {Cache_UNKNOWN, 0 }, |
++/* c0 */ {Cache_UNKNOWN, 0 }, |
++/* c1 */ {Cache_UNKNOWN, 0 }, |
++/* c2 */ {Cache_UNKNOWN, 0 }, |
++/* c3 */ {Cache_UNKNOWN, 0 }, |
++/* c4 */ {Cache_UNKNOWN, 0 }, |
++/* c5 */ {Cache_UNKNOWN, 0 }, |
++/* c6 */ {Cache_UNKNOWN, 0 }, |
++/* c7 */ {Cache_UNKNOWN, 0 }, |
++/* c8 */ {Cache_UNKNOWN, 0 }, |
++/* c9 */ {Cache_UNKNOWN, 0 }, |
++/* ca */ {Cache_UNKNOWN, 0 }, |
++/* cb */ {Cache_UNKNOWN, 0 }, |
++/* cc */ {Cache_UNKNOWN, 0 }, |
++/* cd */ {Cache_UNKNOWN, 0 }, |
++/* ce */ {Cache_UNKNOWN, 0 }, |
++/* cf */ {Cache_UNKNOWN, 0 }, |
++/* d0 */ {Cache_UNKNOWN, 0 }, |
++/* d1 */ {Cache_UNKNOWN, 0 }, |
++/* d2 */ {Cache_UNKNOWN, 0 }, |
++/* d3 */ {Cache_UNKNOWN, 0 }, |
++/* d4 */ {Cache_UNKNOWN, 0 }, |
++/* d5 */ {Cache_UNKNOWN, 0 }, |
++/* d6 */ {Cache_UNKNOWN, 0 }, |
++/* d7 */ {Cache_UNKNOWN, 0 }, |
++/* d8 */ {Cache_UNKNOWN, 0 }, |
++/* d9 */ {Cache_UNKNOWN, 0 }, |
++/* da */ {Cache_UNKNOWN, 0 }, |
++/* db */ {Cache_UNKNOWN, 0 }, |
++/* dc */ {Cache_UNKNOWN, 0 }, |
++/* dd */ {Cache_UNKNOWN, 0 }, |
++/* de */ {Cache_UNKNOWN, 0 }, |
++/* df */ {Cache_UNKNOWN, 0 }, |
++/* e0 */ {Cache_UNKNOWN, 0 }, |
++/* e1 */ {Cache_UNKNOWN, 0 }, |
++/* e2 */ {Cache_UNKNOWN, 0 }, |
++/* e3 */ {Cache_UNKNOWN, 0 }, |
++/* e4 */ {Cache_UNKNOWN, 0 }, |
++/* e5 */ {Cache_UNKNOWN, 0 }, |
++/* e6 */ {Cache_UNKNOWN, 0 }, |
++/* e7 */ {Cache_UNKNOWN, 0 }, |
++/* e8 */ {Cache_UNKNOWN, 0 }, |
++/* e9 */ {Cache_UNKNOWN, 0 }, |
++/* ea */ {Cache_UNKNOWN, 0 }, |
++/* eb */ {Cache_UNKNOWN, 0 }, |
++/* ec */ {Cache_UNKNOWN, 0 }, |
++/* ed */ {Cache_UNKNOWN, 0 }, |
++/* ee */ {Cache_UNKNOWN, 0 }, |
++/* ef */ {Cache_UNKNOWN, 0 }, |
++/* f0 */ {Cache_UNKNOWN, 0 }, |
++/* f1 */ {Cache_UNKNOWN, 0 }, |
++/* f2 */ {Cache_UNKNOWN, 0 }, |
++/* f3 */ {Cache_UNKNOWN, 0 }, |
++/* f4 */ {Cache_UNKNOWN, 0 }, |
++/* f5 */ {Cache_UNKNOWN, 0 }, |
++/* f6 */ {Cache_UNKNOWN, 0 }, |
++/* f7 */ {Cache_UNKNOWN, 0 }, |
++/* f8 */ {Cache_UNKNOWN, 0 }, |
++/* f9 */ {Cache_UNKNOWN, 0 }, |
++/* fa */ {Cache_UNKNOWN, 0 }, |
++/* fb */ {Cache_UNKNOWN, 0 }, |
++/* fc */ {Cache_UNKNOWN, 0 }, |
++/* fd */ {Cache_UNKNOWN, 0 }, |
++/* fe */ {Cache_UNKNOWN, 0 }, |
++/* ff */ {Cache_UNKNOWN, 0 } |
++}; |
++ |
++ |
++/* |
++ * use the above table to determine the CacheEntryLineSize. |
++ */ |
++static void |
++getIntelCacheEntryLineSize(unsigned long val, int *level, |
++ unsigned long *lineSize) |
++{ |
++ CacheType type; |
++ |
++ type = CacheMap[val].type; |
++ /* only interested in data caches */ |
++ /* NOTE val = 0x40 is a special value that means no L2 or L3 cache. |
++ * this data check has the side effect of rejecting that entry. If |
++ * that wasn't the case, we could have to reject it explicitly */ |
++ if (CacheMap[val].lineSize == 0) { |
++ return; |
++ } |
++ /* look at the caches, skip types we aren't interested in. |
++ * if we already have a value for a lower level cache, skip the |
++ * current entry */ |
++ if ((type == Cache_L1)|| (type == Cache_L1d)) { |
++ *level = 1; |
++ *lineSize = CacheMap[val].lineSize; |
++ } else if ((*level >= 2) && ((type == Cache_L2) || (type == Cache_L2d))) { |
++ *level = 2; |
++ *lineSize = CacheMap[val].lineSize; |
++ } else if ((*level >= 3) && ((type == Cache_L3) || (type == Cache_L3d))) { |
++ *level = 3; |
++ *lineSize = CacheMap[val].lineSize; |
++ } |
++ return; |
++} |
++ |
++ |
++static void |
++getIntelRegisterCacheLineSize(unsigned long val, |
++ int *level, unsigned long *lineSize) |
++{ |
++ getIntelCacheEntryLineSize(val >> 24 & 0xff, level, lineSize); |
++ getIntelCacheEntryLineSize(val >> 16 & 0xff, level, lineSize); |
++ getIntelCacheEntryLineSize(val >> 8 & 0xff, level, lineSize); |
++ getIntelCacheEntryLineSize(val & 0xff, level, lineSize); |
++} |
++ |
++/* |
++ * returns '0' if no recognized cache is found, or if the cache |
++ * information is supported by this processor |
++ */ |
++static unsigned long |
++getIntelCacheLineSize(int cpuidLevel) |
++{ |
++ int level = 4; |
++ unsigned long lineSize = 0; |
++ unsigned long eax, ebx, ecx, edx; |
++ int repeat, count; |
++ |
++ if (cpuidLevel < 2) { |
++ return 0; |
++ } |
++ |
++ /* command '2' of the cpuid is intel's cache info call. Each byte of the |
++ * 4 registers contain a potential descriptor for the cache. The CacheMap |
++ * table maps the cache entry with the processor cache. Register 'al' |
++ * contains a count value that cpuid '2' needs to be called in order to |
++ * find all the cache descriptors. Only registers with the high bit set |
++ * to 'zero' have valid descriptors. This code loops through all the |
++ * required calls to cpuid '2' and passes any valid descriptors it finds |
++ * to the getIntelRegisterCacheLineSize code, which breaks the registers |
++ * down into their component descriptors. In the end the lineSize of the |
++ * lowest level cache data cache is returned. */ |
++ freebl_cpuid(2, &eax, &ebx, &ecx, &edx); |
++ repeat = eax & 0xf; |
++ for (count = 0; count < repeat; count++) { |
++ if ((eax & 0x80000000) == 0) { |
++ getIntelRegisterCacheLineSize(eax & 0xffffff00, &level, &lineSize); |
++ } |
++ if ((ebx & 0x80000000) == 0) { |
++ getIntelRegisterCacheLineSize(ebx, &level, &lineSize); |
++ } |
++ if ((ecx & 0x80000000) == 0) { |
++ getIntelRegisterCacheLineSize(ecx, &level, &lineSize); |
++ } |
++ if ((edx & 0x80000000) == 0) { |
++ getIntelRegisterCacheLineSize(edx, &level, &lineSize); |
++ } |
++ if (count+1 != repeat) { |
++ freebl_cpuid(2, &eax, &ebx, &ecx, &edx); |
++ } |
++ } |
++ return lineSize; |
++} |
++ |
++/* |
++ * returns '0' if the cache info is not supported by this processor. |
++ * This is based on the AMD extended cache commands for cpuid. |
++ * (see "AMD Processor Recognition Application Note" Publication 20734). |
++ * Some other processors use the identical scheme. |
++ * (see "Processor Recognition, Transmeta Corporation"). |
++ */ |
++static unsigned long |
++getOtherCacheLineSize(unsigned long cpuidLevel) |
++{ |
++ unsigned long lineSize = 0; |
++ unsigned long eax, ebx, ecx, edx; |
++ |
++ /* get the Extended CPUID level */ |
++ freebl_cpuid(0x80000000, &eax, &ebx, &ecx, &edx); |
++ cpuidLevel = eax; |
++ |
++ if (cpuidLevel >= 0x80000005) { |
++ freebl_cpuid(0x80000005, &eax, &ebx, &ecx, &edx); |
++ lineSize = ecx & 0xff; /* line Size, L1 Data Cache */ |
++ } |
++ return lineSize; |
++} |
++ |
++static const char * const manMap[] = { |
++#define INTEL 0 |
++ "GenuineIntel", |
++#define AMD 1 |
++ "AuthenticAMD", |
++#define CYRIX 2 |
++ "CyrixInstead", |
++#define CENTAUR 2 |
++ "CentaurHauls", |
++#define NEXGEN 3 |
++ "NexGenDriven", |
++#define TRANSMETA 4 |
++ "GenuineTMx86", |
++#define RISE 5 |
++ "RiseRiseRise", |
++#define UMC 6 |
++ "UMC UMC UMC ", |
++#define SIS 7 |
++ "Sis Sis Sis ", |
++#define NATIONAL 8 |
++ "Geode by NSC", |
++}; |
++ |
++static const int n_manufacturers = sizeof(manMap)/sizeof(manMap[0]); |
++ |
++ |
++#define MAN_UNKNOWN 9 |
++ |
++#if !defined(AMD_64) |
++#define SSE2_FLAG (1<<26) |
++unsigned long |
++s_mpi_is_sse2() |
++{ |
++ unsigned long eax, ebx, ecx, edx; |
++ int manufacturer = MAN_UNKNOWN; |
++ int i; |
++ char string[13]; |
++ |
++ if (is386() || is486()) { |
++ return 0; |
++ } |
++ freebl_cpuid(0, &eax, &ebx, &ecx, &edx); |
++ *(int *)string = ebx; |
++ *(int *)&string[4] = edx; |
++ *(int *)&string[8] = ecx; |
++ string[12] = 0; |
++ |
++ /* has no SSE2 extensions */ |
++ if (eax == 0) { |
++ return 0; |
++ } |
++ |
++ for (i=0; i < n_manufacturers; i++) { |
++ if ( strcmp(manMap[i],string) == 0) { |
++ manufacturer = i; |
++ break; |
++ } |
++ } |
++ |
++ freebl_cpuid(1,&eax,&ebx,&ecx,&edx); |
++ return (edx & SSE2_FLAG) == SSE2_FLAG; |
++} |
++#endif |
++ |
++unsigned long |
++s_mpi_getProcessorLineSize() |
++{ |
++ unsigned long eax, ebx, ecx, edx; |
++ unsigned long cpuidLevel; |
++ unsigned long cacheLineSize = 0; |
++ int manufacturer = MAN_UNKNOWN; |
++ int i; |
++ char string[65]; |
++ |
++#if !defined(AMD_64) |
++ if (is386()) { |
++ return 0; /* 386 had no cache */ |
++ } if (is486()) { |
++ return 32; /* really? need more info */ |
++ } |
++#endif |
++ |
++ /* Pentium, cpuid command is available */ |
++ freebl_cpuid(0, &eax, &ebx, &ecx, &edx); |
++ cpuidLevel = eax; |
++ *(int *)string = ebx; |
++ *(int *)&string[4] = edx; |
++ *(int *)&string[8] = ecx; |
++ string[12] = 0; |
++ |
++ manufacturer = MAN_UNKNOWN; |
++ for (i=0; i < n_manufacturers; i++) { |
++ if ( strcmp(manMap[i],string) == 0) { |
++ manufacturer = i; |
++ } |
++ } |
++ |
++ if (manufacturer == INTEL) { |
++ cacheLineSize = getIntelCacheLineSize(cpuidLevel); |
++ } else { |
++ cacheLineSize = getOtherCacheLineSize(cpuidLevel); |
++ } |
++ /* doesn't support cache info based on cpuid. This means |
++ * an old pentium class processor, which have cache lines of |
++ * 32. If we learn differently, we can use a switch based on |
++ * the Manufacturer id */ |
++ if (cacheLineSize == 0) { |
++ cacheLineSize = 32; |
++ } |
++ return cacheLineSize; |
++} |
++#define MPI_GET_PROCESSOR_LINE_SIZE_DEFINED 1 |
++#endif |
++ |
++#if defined(__ppc64__) |
++/* |
++ * Sigh, The PPC has some really nice features to help us determine cache |
++ * size, since it had lots of direct control functions to do so. The POWER |
++ * processor even has an instruction to do this, but it was dropped in |
++ * PowerPC. Unfortunately most of them are not available in user mode. |
++ * |
++ * The dcbz function would be a great way to determine cache line size except |
++ * 1) it only works on write-back memory (it throws an exception otherwise), |
++ * and 2) because so many mac programs 'knew' the processor cache size was |
++ * 32 bytes, they used this instruction as a fast 'zero 32 bytes'. Now the new |
++ * G5 processor has 128 byte cache, but dcbz only clears 32 bytes to keep |
++ * these programs happy. dcbzl work if 64 bit instructions are supported. |
++ * If you know 64 bit instructions are supported, and that stack is |
++ * write-back, you can use this code. |
++ */ |
++#include "memory.h" |
++ |
++/* clear the cache line that contains 'array' */ |
++static inline void dcbzl(char *array) |
++{ |
++ register char *a asm("r2") = array; |
++ __asm__ __volatile__( "dcbzl %0,r0" : "=r" (a): "0"(a) ); |
++} |
++ |
++ |
++#define PPC_DO_ALIGN(x,y) ((char *)\ |
++ ((((long long) (x))+((y)-1))&~((y)-1))) |
++ |
++#define PPC_MAX_LINE_SIZE 256 |
++unsigned long |
++s_mpi_getProcessorLineSize() |
++{ |
++ char testArray[2*PPC_MAX_LINE_SIZE+1]; |
++ char *test; |
++ int i; |
++ |
++ /* align the array on a maximum line size boundary, so we |
++ * know we are starting to clear from the first address */ |
++ test = PPC_DO_ALIGN(testArray, PPC_MAX_LINE_SIZE); |
++ /* set all the values to 1's */ |
++ memset(test, 0xff, PPC_MAX_LINE_SIZE); |
++ /* clear one cache block starting at 'test' */ |
++ dcbzl(test); |
++ |
++ /* find the size of the cleared area, that's our block size */ |
++ for (i=PPC_MAX_LINE_SIZE; i != 0; i = i/2) { |
++ if (test[i-1] == 0) { |
++ return i; |
++ } |
++ } |
++ return 0; |
++} |
++ |
++#define MPI_GET_PROCESSOR_LINE_SIZE_DEFINED 1 |
++#endif |
++ |
++ |
++/* |
++ * put other processor and platform specific cache code here |
++ * return the smallest cache line size in bytes on the processor |
++ * (usually the L1 cache). If the OS has a call, this would be |
++ * a greate place to put it. |
++ * |
++ * If there is no cache, return 0; |
++ * |
++ * define MPI_GET_PROCESSOR_LINE_SIZE_DEFINED so the generic functions |
++ * below aren't compiled. |
++ * |
++ */ |
++ |
++ |
++/* target.mk can define MPI_CACHE_LINE_SIZE if it's common for the family or |
++ * OS */ |
++#if defined(MPI_CACHE_LINE_SIZE) && !defined(MPI_GET_PROCESSOR_LINE_SIZE_DEFINED) |
++ |
++unsigned long |
++s_mpi_getProcessorLineSize() |
++{ |
++ return MPI_CACHE_LINE_SIZE; |
++} |
++#define MPI_GET_PROCESSOR_LINE_SIZE_DEFINED 1 |
++#endif |
++ |
++ |
++/* If no way to get the processor cache line size has been defined, assume |
++ * it's 32 bytes (most common value, does not significantly impact performance) |
++ */ |
++#ifndef MPI_GET_PROCESSOR_LINE_SIZE_DEFINED |
++unsigned long |
++s_mpi_getProcessorLineSize() |
++{ |
++ return 32; |
++} |
++#endif |
++ |
++#ifdef TEST_IT |
++#include <stdio.h> |
++ |
++main() |
++{ |
++ printf("line size = %d\n", s_mpi_getProcessorLineSize()); |
++} |
++#endif |
+diff --git a/net/third_party/nss/ssl/mpi/mpi-config.h b/net/third_party/nss/ssl/mpi/mpi-config.h |
+new file mode 100644 |
+index 0000000..00a0acf |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/mpi/mpi-config.h |
+@@ -0,0 +1,112 @@ |
++/* Default configuration for MPI library |
++ * |
++ * ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Original Code is the MPI Arbitrary Precision Integer Arithmetic library. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Michael J. Fromberger. |
++ * Portions created by the Initial Developer are Copyright (C) 1997 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * Netscape Communications Corporation |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++/* $Id: mpi-config.h,v 1.5 2004/04/25 15:03:10 gerv%gerv.net Exp $ */ |
++ |
++#ifndef MPI_CONFIG_H_ |
++#define MPI_CONFIG_H_ |
++ |
++/* |
++ For boolean options, |
++ 0 = no |
++ 1 = yes |
++ |
++ Other options are documented individually. |
++ |
++ */ |
++ |
++#ifndef MP_IOFUNC |
++#define MP_IOFUNC 0 /* include mp_print() ? */ |
++#endif |
++ |
++#ifndef MP_MODARITH |
++#define MP_MODARITH 1 /* include modular arithmetic ? */ |
++#endif |
++ |
++#ifndef MP_NUMTH |
++#define MP_NUMTH 1 /* include number theoretic functions? */ |
++#endif |
++ |
++#ifndef MP_LOGTAB |
++#define MP_LOGTAB 1 /* use table of logs instead of log()? */ |
++#endif |
++ |
++#ifndef MP_MEMSET |
++#define MP_MEMSET 1 /* use memset() to zero buffers? */ |
++#endif |
++ |
++#ifndef MP_MEMCPY |
++#define MP_MEMCPY 1 /* use memcpy() to copy buffers? */ |
++#endif |
++ |
++#ifndef MP_CRYPTO |
++#define MP_CRYPTO 1 /* erase memory on free? */ |
++#endif |
++ |
++#ifndef MP_ARGCHK |
++/* |
++ 0 = no parameter checks |
++ 1 = runtime checks, continue execution and return an error to caller |
++ 2 = assertions; dump core on parameter errors |
++ */ |
++#ifdef DEBUG |
++#define MP_ARGCHK 2 /* how to check input arguments */ |
++#else |
++#define MP_ARGCHK 1 /* how to check input arguments */ |
++#endif |
++#endif |
++ |
++#ifndef MP_DEBUG |
++#define MP_DEBUG 0 /* print diagnostic output? */ |
++#endif |
++ |
++#ifndef MP_DEFPREC |
++#define MP_DEFPREC 64 /* default precision, in digits */ |
++#endif |
++ |
++#ifndef MP_MACRO |
++#define MP_MACRO 0 /* use macros for frequent calls? */ |
++#endif |
++ |
++#ifndef MP_SQUARE |
++#define MP_SQUARE 1 /* use separate squaring code? */ |
++#endif |
++ |
++#endif /* ifndef MPI_CONFIG_H_ */ |
++ |
++ |
+diff --git a/net/third_party/nss/ssl/mpi/mpi-priv.h b/net/third_party/nss/ssl/mpi/mpi-priv.h |
+new file mode 100644 |
+index 0000000..8efaf3c |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/mpi/mpi-priv.h |
+@@ -0,0 +1,320 @@ |
++/* |
++ * mpi-priv.h - Private header file for MPI |
++ * Arbitrary precision integer arithmetic library |
++ * |
++ * NOTE WELL: the content of this header file is NOT part of the "public" |
++ * API for the MPI library, and may change at any time. |
++ * Application programs that use libmpi should NOT include this header file. |
++ * |
++ * ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Original Code is the MPI Arbitrary Precision Integer Arithmetic library. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Michael J. Fromberger. |
++ * Portions created by the Initial Developer are Copyright (C) 1998 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * Netscape Communications Corporation |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++/* $Id: mpi-priv.h,v 1.23 2010/05/02 22:36:41 nelson%bolyard.com Exp $ */ |
++#ifndef _MPI_PRIV_H_ |
++#define _MPI_PRIV_H_ 1 |
++ |
++#include "mpi.h" |
++#include <stdlib.h> |
++#include <string.h> |
++#include <ctype.h> |
++ |
++#if MP_DEBUG |
++#include <stdio.h> |
++ |
++#define DIAG(T,V) {fprintf(stderr,T);mp_print(V,stderr);fputc('\n',stderr);} |
++#else |
++#define DIAG(T,V) |
++#endif |
++ |
++/* If we aren't using a wired-in logarithm table, we need to include |
++ the math library to get the log() function |
++ */ |
++ |
++/* {{{ s_logv_2[] - log table for 2 in various bases */ |
++ |
++#if MP_LOGTAB |
++/* |
++ A table of the logs of 2 for various bases (the 0 and 1 entries of |
++ this table are meaningless and should not be referenced). |
++ |
++ This table is used to compute output lengths for the mp_toradix() |
++ function. Since a number n in radix r takes up about log_r(n) |
++ digits, we estimate the output size by taking the least integer |
++ greater than log_r(n), where: |
++ |
++ log_r(n) = log_2(n) * log_r(2) |
++ |
++ This table, therefore, is a table of log_r(2) for 2 <= r <= 36, |
++ which are the output bases supported. |
++ */ |
++ |
++extern const float s_logv_2[]; |
++#define LOG_V_2(R) s_logv_2[(R)] |
++ |
++#else |
++ |
++/* |
++ If MP_LOGTAB is not defined, use the math library to compute the |
++ logarithms on the fly. Otherwise, use the table. |
++ Pick which works best for your system. |
++ */ |
++ |
++#include <math.h> |
++#define LOG_V_2(R) (log(2.0)/log(R)) |
++ |
++#endif /* if MP_LOGTAB */ |
++ |
++/* }}} */ |
++ |
++/* {{{ Digit arithmetic macros */ |
++ |
++/* |
++ When adding and multiplying digits, the results can be larger than |
++ can be contained in an mp_digit. Thus, an mp_word is used. These |
++ macros mask off the upper and lower digits of the mp_word (the |
++ mp_word may be more than 2 mp_digits wide, but we only concern |
++ ourselves with the low-order 2 mp_digits) |
++ */ |
++ |
++#define CARRYOUT(W) (mp_digit)((W)>>DIGIT_BIT) |
++#define ACCUM(W) (mp_digit)(W) |
++ |
++#define MP_MIN(a,b) (((a) < (b)) ? (a) : (b)) |
++#define MP_MAX(a,b) (((a) > (b)) ? (a) : (b)) |
++#define MP_HOWMANY(a,b) (((a) + (b) - 1)/(b)) |
++#define MP_ROUNDUP(a,b) (MP_HOWMANY(a,b) * (b)) |
++ |
++/* }}} */ |
++ |
++/* {{{ Comparison constants */ |
++ |
++#define MP_LT -1 |
++#define MP_EQ 0 |
++#define MP_GT 1 |
++ |
++/* }}} */ |
++ |
++/* {{{ private function declarations */ |
++ |
++/* |
++ If MP_MACRO is false, these will be defined as actual functions; |
++ otherwise, suitable macro definitions will be used. This works |
++ around the fact that ANSI C89 doesn't support an 'inline' keyword |
++ (although I hear C9x will ... about bloody time). At present, the |
++ macro definitions are identical to the function bodies, but they'll |
++ expand in place, instead of generating a function call. |
++ |
++ I chose these particular functions to be made into macros because |
++ some profiling showed they are called a lot on a typical workload, |
++ and yet they are primarily housekeeping. |
++ */ |
++#if MP_MACRO == 0 |
++ void s_mp_setz(mp_digit *dp, mp_size count); /* zero digits */ |
++ void s_mp_copy(const mp_digit *sp, mp_digit *dp, mp_size count); /* copy */ |
++ void *s_mp_alloc(size_t nb, size_t ni); /* general allocator */ |
++ void s_mp_free(void *ptr); /* general free function */ |
++extern unsigned long mp_allocs; |
++extern unsigned long mp_frees; |
++extern unsigned long mp_copies; |
++#else |
++ |
++ /* Even if these are defined as macros, we need to respect the settings |
++ of the MP_MEMSET and MP_MEMCPY configuration options... |
++ */ |
++ #if MP_MEMSET == 0 |
++ #define s_mp_setz(dp, count) \ |
++ {int ix;for(ix=0;ix<(count);ix++)(dp)[ix]=0;} |
++ #else |
++ #define s_mp_setz(dp, count) memset(dp, 0, (count) * sizeof(mp_digit)) |
++ #endif /* MP_MEMSET */ |
++ |
++ #if MP_MEMCPY == 0 |
++ #define s_mp_copy(sp, dp, count) \ |
++ {int ix;for(ix=0;ix<(count);ix++)(dp)[ix]=(sp)[ix];} |
++ #else |
++ #define s_mp_copy(sp, dp, count) memcpy(dp, sp, (count) * sizeof(mp_digit)) |
++ #endif /* MP_MEMCPY */ |
++ |
++ #define s_mp_alloc(nb, ni) calloc(nb, ni) |
++ #define s_mp_free(ptr) {if(ptr) free(ptr);} |
++#endif /* MP_MACRO */ |
++ |
++mp_err s_mp_grow(mp_int *mp, mp_size min); /* increase allocated size */ |
++mp_err s_mp_pad(mp_int *mp, mp_size min); /* left pad with zeroes */ |
++ |
++#if MP_MACRO == 0 |
++ void s_mp_clamp(mp_int *mp); /* clip leading zeroes */ |
++#else |
++ #define s_mp_clamp(mp)\ |
++ { mp_size used = MP_USED(mp); \ |
++ while (used > 1 && DIGIT(mp, used - 1) == 0) --used; \ |
++ MP_USED(mp) = used; \ |
++ } |
++#endif /* MP_MACRO */ |
++ |
++void s_mp_exch(mp_int *a, mp_int *b); /* swap a and b in place */ |
++ |
++mp_err s_mp_lshd(mp_int *mp, mp_size p); /* left-shift by p digits */ |
++void s_mp_rshd(mp_int *mp, mp_size p); /* right-shift by p digits */ |
++mp_err s_mp_mul_2d(mp_int *mp, mp_digit d); /* multiply by 2^d in place */ |
++void s_mp_div_2d(mp_int *mp, mp_digit d); /* divide by 2^d in place */ |
++void s_mp_mod_2d(mp_int *mp, mp_digit d); /* modulo 2^d in place */ |
++void s_mp_div_2(mp_int *mp); /* divide by 2 in place */ |
++mp_err s_mp_mul_2(mp_int *mp); /* multiply by 2 in place */ |
++mp_err s_mp_norm(mp_int *a, mp_int *b, mp_digit *pd); |
++ /* normalize for division */ |
++mp_err s_mp_add_d(mp_int *mp, mp_digit d); /* unsigned digit addition */ |
++mp_err s_mp_sub_d(mp_int *mp, mp_digit d); /* unsigned digit subtract */ |
++mp_err s_mp_mul_d(mp_int *mp, mp_digit d); /* unsigned digit multiply */ |
++mp_err s_mp_div_d(mp_int *mp, mp_digit d, mp_digit *r); |
++ /* unsigned digit divide */ |
++mp_err s_mp_reduce(mp_int *x, const mp_int *m, const mp_int *mu); |
++ /* Barrett reduction */ |
++mp_err s_mp_add(mp_int *a, const mp_int *b); /* magnitude addition */ |
++mp_err s_mp_add_3arg(const mp_int *a, const mp_int *b, mp_int *c); |
++mp_err s_mp_sub(mp_int *a, const mp_int *b); /* magnitude subtract */ |
++mp_err s_mp_sub_3arg(const mp_int *a, const mp_int *b, mp_int *c); |
++mp_err s_mp_add_offset(mp_int *a, mp_int *b, mp_size offset); |
++ /* a += b * RADIX^offset */ |
++mp_err s_mp_mul(mp_int *a, const mp_int *b); /* magnitude multiply */ |
++#if MP_SQUARE |
++mp_err s_mp_sqr(mp_int *a); /* magnitude square */ |
++#else |
++#define s_mp_sqr(a) s_mp_mul(a, a) |
++#endif |
++mp_err s_mp_div(mp_int *rem, mp_int *div, mp_int *quot); /* magnitude div */ |
++mp_err s_mp_exptmod(const mp_int *a, const mp_int *b, const mp_int *m, mp_int *c); |
++mp_err s_mp_2expt(mp_int *a, mp_digit k); /* a = 2^k */ |
++int s_mp_cmp(const mp_int *a, const mp_int *b); /* magnitude comparison */ |
++int s_mp_cmp_d(const mp_int *a, mp_digit d); /* magnitude digit compare */ |
++int s_mp_ispow2(const mp_int *v); /* is v a power of 2? */ |
++int s_mp_ispow2d(mp_digit d); /* is d a power of 2? */ |
++ |
++int s_mp_tovalue(char ch, int r); /* convert ch to value */ |
++char s_mp_todigit(mp_digit val, int r, int low); /* convert val to digit */ |
++int s_mp_outlen(int bits, int r); /* output length in bytes */ |
++mp_digit s_mp_invmod_radix(mp_digit P); /* returns (P ** -1) mod RADIX */ |
++mp_err s_mp_invmod_odd_m( const mp_int *a, const mp_int *m, mp_int *c); |
++mp_err s_mp_invmod_2d( const mp_int *a, mp_size k, mp_int *c); |
++mp_err s_mp_invmod_even_m(const mp_int *a, const mp_int *m, mp_int *c); |
++ |
++#ifdef NSS_USE_COMBA |
++ |
++#define IS_POWER_OF_2(a) ((a) && !((a) & ((a)-1))) |
++ |
++void s_mp_mul_comba_4(const mp_int *A, const mp_int *B, mp_int *C); |
++void s_mp_mul_comba_8(const mp_int *A, const mp_int *B, mp_int *C); |
++void s_mp_mul_comba_16(const mp_int *A, const mp_int *B, mp_int *C); |
++void s_mp_mul_comba_32(const mp_int *A, const mp_int *B, mp_int *C); |
++ |
++void s_mp_sqr_comba_4(const mp_int *A, mp_int *B); |
++void s_mp_sqr_comba_8(const mp_int *A, mp_int *B); |
++void s_mp_sqr_comba_16(const mp_int *A, mp_int *B); |
++void s_mp_sqr_comba_32(const mp_int *A, mp_int *B); |
++ |
++#endif /* end NSS_USE_COMBA */ |
++ |
++/* ------ mpv functions, operate on arrays of digits, not on mp_int's ------ */ |
++#if defined (__OS2__) && defined (__IBMC__) |
++#define MPI_ASM_DECL __cdecl |
++#else |
++#define MPI_ASM_DECL |
++#endif |
++ |
++#ifdef MPI_AMD64 |
++ |
++mp_digit MPI_ASM_DECL s_mpv_mul_set_vec64(mp_digit*, mp_digit *, mp_size, mp_digit); |
++mp_digit MPI_ASM_DECL s_mpv_mul_add_vec64(mp_digit*, const mp_digit*, mp_size, mp_digit); |
++ |
++/* c = a * b */ |
++#define s_mpv_mul_d(a, a_len, b, c) \ |
++ ((mp_digit *)c)[a_len] = s_mpv_mul_set_vec64(c, a, a_len, b) |
++ |
++/* c += a * b */ |
++#define s_mpv_mul_d_add(a, a_len, b, c) \ |
++ ((mp_digit *)c)[a_len] = s_mpv_mul_add_vec64(c, a, a_len, b) |
++ |
++ |
++#else |
++ |
++void MPI_ASM_DECL s_mpv_mul_d(const mp_digit *a, mp_size a_len, |
++ mp_digit b, mp_digit *c); |
++void MPI_ASM_DECL s_mpv_mul_d_add(const mp_digit *a, mp_size a_len, |
++ mp_digit b, mp_digit *c); |
++ |
++#endif |
++ |
++void MPI_ASM_DECL s_mpv_mul_d_add_prop(const mp_digit *a, |
++ mp_size a_len, mp_digit b, |
++ mp_digit *c); |
++void MPI_ASM_DECL s_mpv_sqr_add_prop(const mp_digit *a, |
++ mp_size a_len, |
++ mp_digit *sqrs); |
++ |
++mp_err MPI_ASM_DECL s_mpv_div_2dx1d(mp_digit Nhi, mp_digit Nlo, |
++ mp_digit divisor, mp_digit *quot, mp_digit *rem); |
++ |
++/* c += a * b * (MP_RADIX ** offset); */ |
++#define s_mp_mul_d_add_offset(a, b, c, off) \ |
++(s_mpv_mul_d_add_prop(MP_DIGITS(a), MP_USED(a), b, MP_DIGITS(c) + off), MP_OKAY) |
++ |
++typedef struct { |
++ mp_int N; /* modulus N */ |
++ mp_digit n0prime; /* n0' = - (n0 ** -1) mod MP_RADIX */ |
++ mp_size b; /* R == 2 ** b, also b = # significant bits in N */ |
++} mp_mont_modulus; |
++ |
++mp_err s_mp_mul_mont(const mp_int *a, const mp_int *b, mp_int *c, |
++ mp_mont_modulus *mmm); |
++mp_err s_mp_redc(mp_int *T, mp_mont_modulus *mmm); |
++ |
++/* |
++ * s_mpi_getProcessorLineSize() returns the size in bytes of the cache line |
++ * if a cache exists, or zero if there is no cache. If more than one |
++ * cache line exists, it should return the smallest line size (which is |
++ * usually the L1 cache). |
++ * |
++ * mp_modexp uses this information to make sure that private key information |
++ * isn't being leaked through the cache. |
++ * |
++ * see mpcpucache.c for the implementation. |
++ */ |
++unsigned long s_mpi_getProcessorLineSize(); |
++ |
++/* }}} */ |
++#endif |
++ |
++ |
+diff --git a/net/third_party/nss/ssl/mpi/mpi.c b/net/third_party/nss/ssl/mpi/mpi.c |
+new file mode 100644 |
+index 0000000..8cd6ca6 |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/mpi/mpi.c |
+@@ -0,0 +1,4852 @@ |
++/* |
++ * mpi.c |
++ * |
++ * Arbitrary precision integer arithmetic library |
++ * |
++ * ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Original Code is the MPI Arbitrary Precision Integer Arithmetic library. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Michael J. Fromberger. |
++ * Portions created by the Initial Developer are Copyright (C) 1998 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * Netscape Communications Corporation |
++ * Douglas Stebila <douglas@stebila.ca> of Sun Laboratories. |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++/* $Id: mpi.c,v 1.47 2010/05/02 22:36:41 nelson%bolyard.com Exp $ */ |
++ |
++#define MP_API_COMPATIBLE 1 |
++#include "mpi-priv.h" |
++#if defined(OSF1) |
++#include <c_asm.h> |
++#endif |
++ |
++#if MP_LOGTAB |
++/* |
++ A table of the logs of 2 for various bases (the 0 and 1 entries of |
++ this table are meaningless and should not be referenced). |
++ |
++ This table is used to compute output lengths for the mp_toradix() |
++ function. Since a number n in radix r takes up about log_r(n) |
++ digits, we estimate the output size by taking the least integer |
++ greater than log_r(n), where: |
++ |
++ log_r(n) = log_2(n) * log_r(2) |
++ |
++ This table, therefore, is a table of log_r(2) for 2 <= r <= 36, |
++ which are the output bases supported. |
++ */ |
++#include "logtab.h" |
++#endif |
++ |
++/* {{{ Constant strings */ |
++ |
++/* Constant strings returned by mp_strerror() */ |
++static const char *mp_err_string[] = { |
++ "unknown result code", /* say what? */ |
++ "boolean true", /* MP_OKAY, MP_YES */ |
++ "boolean false", /* MP_NO */ |
++ "out of memory", /* MP_MEM */ |
++ "argument out of range", /* MP_RANGE */ |
++ "invalid input parameter", /* MP_BADARG */ |
++ "result is undefined" /* MP_UNDEF */ |
++}; |
++ |
++/* Value to digit maps for radix conversion */ |
++ |
++/* s_dmap_1 - standard digits and letters */ |
++static const char *s_dmap_1 = |
++ "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+/"; |
++ |
++/* }}} */ |
++ |
++unsigned long mp_allocs; |
++unsigned long mp_frees; |
++unsigned long mp_copies; |
++ |
++/* {{{ Default precision manipulation */ |
++ |
++/* Default precision for newly created mp_int's */ |
++static mp_size s_mp_defprec = MP_DEFPREC; |
++ |
++mp_size mp_get_prec(void) |
++{ |
++ return s_mp_defprec; |
++ |
++} /* end mp_get_prec() */ |
++ |
++void mp_set_prec(mp_size prec) |
++{ |
++ if(prec == 0) |
++ s_mp_defprec = MP_DEFPREC; |
++ else |
++ s_mp_defprec = prec; |
++ |
++} /* end mp_set_prec() */ |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* {{{ mp_init(mp) */ |
++ |
++/* |
++ mp_init(mp) |
++ |
++ Initialize a new zero-valued mp_int. Returns MP_OKAY if successful, |
++ MP_MEM if memory could not be allocated for the structure. |
++ */ |
++ |
++mp_err mp_init(mp_int *mp) |
++{ |
++ return mp_init_size(mp, s_mp_defprec); |
++ |
++} /* end mp_init() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_init_size(mp, prec) */ |
++ |
++/* |
++ mp_init_size(mp, prec) |
++ |
++ Initialize a new zero-valued mp_int with at least the given |
++ precision; returns MP_OKAY if successful, or MP_MEM if memory could |
++ not be allocated for the structure. |
++ */ |
++ |
++mp_err mp_init_size(mp_int *mp, mp_size prec) |
++{ |
++ ARGCHK(mp != NULL && prec > 0, MP_BADARG); |
++ |
++ prec = MP_ROUNDUP(prec, s_mp_defprec); |
++ if((DIGITS(mp) = s_mp_alloc(prec, sizeof(mp_digit))) == NULL) |
++ return MP_MEM; |
++ |
++ SIGN(mp) = ZPOS; |
++ USED(mp) = 1; |
++ ALLOC(mp) = prec; |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_init_size() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_init_copy(mp, from) */ |
++ |
++/* |
++ mp_init_copy(mp, from) |
++ |
++ Initialize mp as an exact copy of from. Returns MP_OKAY if |
++ successful, MP_MEM if memory could not be allocated for the new |
++ structure. |
++ */ |
++ |
++mp_err mp_init_copy(mp_int *mp, const mp_int *from) |
++{ |
++ ARGCHK(mp != NULL && from != NULL, MP_BADARG); |
++ |
++ if(mp == from) |
++ return MP_OKAY; |
++ |
++ if((DIGITS(mp) = s_mp_alloc(ALLOC(from), sizeof(mp_digit))) == NULL) |
++ return MP_MEM; |
++ |
++ s_mp_copy(DIGITS(from), DIGITS(mp), USED(from)); |
++ USED(mp) = USED(from); |
++ ALLOC(mp) = ALLOC(from); |
++ SIGN(mp) = SIGN(from); |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_init_copy() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_copy(from, to) */ |
++ |
++/* |
++ mp_copy(from, to) |
++ |
++ Copies the mp_int 'from' to the mp_int 'to'. It is presumed that |
++ 'to' has already been initialized (if not, use mp_init_copy() |
++ instead). If 'from' and 'to' are identical, nothing happens. |
++ */ |
++ |
++mp_err mp_copy(const mp_int *from, mp_int *to) |
++{ |
++ ARGCHK(from != NULL && to != NULL, MP_BADARG); |
++ |
++ if(from == to) |
++ return MP_OKAY; |
++ |
++ ++mp_copies; |
++ { /* copy */ |
++ mp_digit *tmp; |
++ |
++ /* |
++ If the allocated buffer in 'to' already has enough space to hold |
++ all the used digits of 'from', we'll re-use it to avoid hitting |
++ the memory allocater more than necessary; otherwise, we'd have |
++ to grow anyway, so we just allocate a hunk and make the copy as |
++ usual |
++ */ |
++ if(ALLOC(to) >= USED(from)) { |
++ s_mp_setz(DIGITS(to) + USED(from), ALLOC(to) - USED(from)); |
++ s_mp_copy(DIGITS(from), DIGITS(to), USED(from)); |
++ |
++ } else { |
++ if((tmp = s_mp_alloc(ALLOC(from), sizeof(mp_digit))) == NULL) |
++ return MP_MEM; |
++ |
++ s_mp_copy(DIGITS(from), tmp, USED(from)); |
++ |
++ if(DIGITS(to) != NULL) { |
++#if MP_CRYPTO |
++ s_mp_setz(DIGITS(to), ALLOC(to)); |
++#endif |
++ s_mp_free(DIGITS(to)); |
++ } |
++ |
++ DIGITS(to) = tmp; |
++ ALLOC(to) = ALLOC(from); |
++ } |
++ |
++ /* Copy the precision and sign from the original */ |
++ USED(to) = USED(from); |
++ SIGN(to) = SIGN(from); |
++ } /* end copy */ |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_copy() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_exch(mp1, mp2) */ |
++ |
++/* |
++ mp_exch(mp1, mp2) |
++ |
++ Exchange mp1 and mp2 without allocating any intermediate memory |
++ (well, unless you count the stack space needed for this call and the |
++ locals it creates...). This cannot fail. |
++ */ |
++ |
++void mp_exch(mp_int *mp1, mp_int *mp2) |
++{ |
++#if MP_ARGCHK == 2 |
++ assert(mp1 != NULL && mp2 != NULL); |
++#else |
++ if(mp1 == NULL || mp2 == NULL) |
++ return; |
++#endif |
++ |
++ s_mp_exch(mp1, mp2); |
++ |
++} /* end mp_exch() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_clear(mp) */ |
++ |
++/* |
++ mp_clear(mp) |
++ |
++ Release the storage used by an mp_int, and void its fields so that |
++ if someone calls mp_clear() again for the same int later, we won't |
++ get tollchocked. |
++ */ |
++ |
++void mp_clear(mp_int *mp) |
++{ |
++ if(mp == NULL) |
++ return; |
++ |
++ if(DIGITS(mp) != NULL) { |
++#if MP_CRYPTO |
++ s_mp_setz(DIGITS(mp), ALLOC(mp)); |
++#endif |
++ s_mp_free(DIGITS(mp)); |
++ DIGITS(mp) = NULL; |
++ } |
++ |
++ USED(mp) = 0; |
++ ALLOC(mp) = 0; |
++ |
++} /* end mp_clear() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_zero(mp) */ |
++ |
++/* |
++ mp_zero(mp) |
++ |
++ Set mp to zero. Does not change the allocated size of the structure, |
++ and therefore cannot fail (except on a bad argument, which we ignore) |
++ */ |
++void mp_zero(mp_int *mp) |
++{ |
++ if(mp == NULL) |
++ return; |
++ |
++ s_mp_setz(DIGITS(mp), ALLOC(mp)); |
++ USED(mp) = 1; |
++ SIGN(mp) = ZPOS; |
++ |
++} /* end mp_zero() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_set(mp, d) */ |
++ |
++void mp_set(mp_int *mp, mp_digit d) |
++{ |
++ if(mp == NULL) |
++ return; |
++ |
++ mp_zero(mp); |
++ DIGIT(mp, 0) = d; |
++ |
++} /* end mp_set() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_set_int(mp, z) */ |
++ |
++mp_err mp_set_int(mp_int *mp, long z) |
++{ |
++ int ix; |
++ unsigned long v = labs(z); |
++ mp_err res; |
++ |
++ ARGCHK(mp != NULL, MP_BADARG); |
++ |
++ mp_zero(mp); |
++ if(z == 0) |
++ return MP_OKAY; /* shortcut for zero */ |
++ |
++ if (sizeof v <= sizeof(mp_digit)) { |
++ DIGIT(mp,0) = v; |
++ } else { |
++ for (ix = sizeof(long) - 1; ix >= 0; ix--) { |
++ if ((res = s_mp_mul_d(mp, (UCHAR_MAX + 1))) != MP_OKAY) |
++ return res; |
++ |
++ res = s_mp_add_d(mp, (mp_digit)((v >> (ix * CHAR_BIT)) & UCHAR_MAX)); |
++ if (res != MP_OKAY) |
++ return res; |
++ } |
++ } |
++ if(z < 0) |
++ SIGN(mp) = NEG; |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_set_int() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_set_ulong(mp, z) */ |
++ |
++mp_err mp_set_ulong(mp_int *mp, unsigned long z) |
++{ |
++ int ix; |
++ mp_err res; |
++ |
++ ARGCHK(mp != NULL, MP_BADARG); |
++ |
++ mp_zero(mp); |
++ if(z == 0) |
++ return MP_OKAY; /* shortcut for zero */ |
++ |
++ if (sizeof z <= sizeof(mp_digit)) { |
++ DIGIT(mp,0) = z; |
++ } else { |
++ for (ix = sizeof(long) - 1; ix >= 0; ix--) { |
++ if ((res = s_mp_mul_d(mp, (UCHAR_MAX + 1))) != MP_OKAY) |
++ return res; |
++ |
++ res = s_mp_add_d(mp, (mp_digit)((z >> (ix * CHAR_BIT)) & UCHAR_MAX)); |
++ if (res != MP_OKAY) |
++ return res; |
++ } |
++ } |
++ return MP_OKAY; |
++} /* end mp_set_ulong() */ |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* {{{ Digit arithmetic */ |
++ |
++/* {{{ mp_add_d(a, d, b) */ |
++ |
++/* |
++ mp_add_d(a, d, b) |
++ |
++ Compute the sum b = a + d, for a single digit d. Respects the sign of |
++ its primary addend (single digits are unsigned anyway). |
++ */ |
++ |
++mp_err mp_add_d(const mp_int *a, mp_digit d, mp_int *b) |
++{ |
++ mp_int tmp; |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && b != NULL, MP_BADARG); |
++ |
++ if((res = mp_init_copy(&tmp, a)) != MP_OKAY) |
++ return res; |
++ |
++ if(SIGN(&tmp) == ZPOS) { |
++ if((res = s_mp_add_d(&tmp, d)) != MP_OKAY) |
++ goto CLEANUP; |
++ } else if(s_mp_cmp_d(&tmp, d) >= 0) { |
++ if((res = s_mp_sub_d(&tmp, d)) != MP_OKAY) |
++ goto CLEANUP; |
++ } else { |
++ mp_neg(&tmp, &tmp); |
++ |
++ DIGIT(&tmp, 0) = d - DIGIT(&tmp, 0); |
++ } |
++ |
++ if(s_mp_cmp_d(&tmp, 0) == 0) |
++ SIGN(&tmp) = ZPOS; |
++ |
++ s_mp_exch(&tmp, b); |
++ |
++CLEANUP: |
++ mp_clear(&tmp); |
++ return res; |
++ |
++} /* end mp_add_d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_sub_d(a, d, b) */ |
++ |
++/* |
++ mp_sub_d(a, d, b) |
++ |
++ Compute the difference b = a - d, for a single digit d. Respects the |
++ sign of its subtrahend (single digits are unsigned anyway). |
++ */ |
++ |
++mp_err mp_sub_d(const mp_int *a, mp_digit d, mp_int *b) |
++{ |
++ mp_int tmp; |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && b != NULL, MP_BADARG); |
++ |
++ if((res = mp_init_copy(&tmp, a)) != MP_OKAY) |
++ return res; |
++ |
++ if(SIGN(&tmp) == NEG) { |
++ if((res = s_mp_add_d(&tmp, d)) != MP_OKAY) |
++ goto CLEANUP; |
++ } else if(s_mp_cmp_d(&tmp, d) >= 0) { |
++ if((res = s_mp_sub_d(&tmp, d)) != MP_OKAY) |
++ goto CLEANUP; |
++ } else { |
++ mp_neg(&tmp, &tmp); |
++ |
++ DIGIT(&tmp, 0) = d - DIGIT(&tmp, 0); |
++ SIGN(&tmp) = NEG; |
++ } |
++ |
++ if(s_mp_cmp_d(&tmp, 0) == 0) |
++ SIGN(&tmp) = ZPOS; |
++ |
++ s_mp_exch(&tmp, b); |
++ |
++CLEANUP: |
++ mp_clear(&tmp); |
++ return res; |
++ |
++} /* end mp_sub_d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_mul_d(a, d, b) */ |
++ |
++/* |
++ mp_mul_d(a, d, b) |
++ |
++ Compute the product b = a * d, for a single digit d. Respects the sign |
++ of its multiplicand (single digits are unsigned anyway) |
++ */ |
++ |
++mp_err mp_mul_d(const mp_int *a, mp_digit d, mp_int *b) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && b != NULL, MP_BADARG); |
++ |
++ if(d == 0) { |
++ mp_zero(b); |
++ return MP_OKAY; |
++ } |
++ |
++ if((res = mp_copy(a, b)) != MP_OKAY) |
++ return res; |
++ |
++ res = s_mp_mul_d(b, d); |
++ |
++ return res; |
++ |
++} /* end mp_mul_d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_mul_2(a, c) */ |
++ |
++mp_err mp_mul_2(const mp_int *a, mp_int *c) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && c != NULL, MP_BADARG); |
++ |
++ if((res = mp_copy(a, c)) != MP_OKAY) |
++ return res; |
++ |
++ return s_mp_mul_2(c); |
++ |
++} /* end mp_mul_2() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_div_d(a, d, q, r) */ |
++ |
++/* |
++ mp_div_d(a, d, q, r) |
++ |
++ Compute the quotient q = a / d and remainder r = a mod d, for a |
++ single digit d. Respects the sign of its divisor (single digits are |
++ unsigned anyway). |
++ */ |
++ |
++mp_err mp_div_d(const mp_int *a, mp_digit d, mp_int *q, mp_digit *r) |
++{ |
++ mp_err res; |
++ mp_int qp; |
++ mp_digit rem; |
++ int pow; |
++ |
++ ARGCHK(a != NULL, MP_BADARG); |
++ |
++ if(d == 0) |
++ return MP_RANGE; |
++ |
++ /* Shortcut for powers of two ... */ |
++ if((pow = s_mp_ispow2d(d)) >= 0) { |
++ mp_digit mask; |
++ |
++ mask = ((mp_digit)1 << pow) - 1; |
++ rem = DIGIT(a, 0) & mask; |
++ |
++ if(q) { |
++ mp_copy(a, q); |
++ s_mp_div_2d(q, pow); |
++ } |
++ |
++ if(r) |
++ *r = rem; |
++ |
++ return MP_OKAY; |
++ } |
++ |
++ if((res = mp_init_copy(&qp, a)) != MP_OKAY) |
++ return res; |
++ |
++ res = s_mp_div_d(&qp, d, &rem); |
++ |
++ if(s_mp_cmp_d(&qp, 0) == 0) |
++ SIGN(q) = ZPOS; |
++ |
++ if(r) |
++ *r = rem; |
++ |
++ if(q) |
++ s_mp_exch(&qp, q); |
++ |
++ mp_clear(&qp); |
++ return res; |
++ |
++} /* end mp_div_d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_div_2(a, c) */ |
++ |
++/* |
++ mp_div_2(a, c) |
++ |
++ Compute c = a / 2, disregarding the remainder. |
++ */ |
++ |
++mp_err mp_div_2(const mp_int *a, mp_int *c) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && c != NULL, MP_BADARG); |
++ |
++ if((res = mp_copy(a, c)) != MP_OKAY) |
++ return res; |
++ |
++ s_mp_div_2(c); |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_div_2() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_expt_d(a, d, b) */ |
++ |
++mp_err mp_expt_d(const mp_int *a, mp_digit d, mp_int *c) |
++{ |
++ mp_int s, x; |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && c != NULL, MP_BADARG); |
++ |
++ if((res = mp_init(&s)) != MP_OKAY) |
++ return res; |
++ if((res = mp_init_copy(&x, a)) != MP_OKAY) |
++ goto X; |
++ |
++ DIGIT(&s, 0) = 1; |
++ |
++ while(d != 0) { |
++ if(d & 1) { |
++ if((res = s_mp_mul(&s, &x)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ |
++ d /= 2; |
++ |
++ if((res = s_mp_sqr(&x)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ |
++ s_mp_exch(&s, c); |
++ |
++CLEANUP: |
++ mp_clear(&x); |
++X: |
++ mp_clear(&s); |
++ |
++ return res; |
++ |
++} /* end mp_expt_d() */ |
++ |
++/* }}} */ |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* {{{ Full arithmetic */ |
++ |
++/* {{{ mp_abs(a, b) */ |
++ |
++/* |
++ mp_abs(a, b) |
++ |
++ Compute b = |a|. 'a' and 'b' may be identical. |
++ */ |
++ |
++mp_err mp_abs(const mp_int *a, mp_int *b) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && b != NULL, MP_BADARG); |
++ |
++ if((res = mp_copy(a, b)) != MP_OKAY) |
++ return res; |
++ |
++ SIGN(b) = ZPOS; |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_abs() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_neg(a, b) */ |
++ |
++/* |
++ mp_neg(a, b) |
++ |
++ Compute b = -a. 'a' and 'b' may be identical. |
++ */ |
++ |
++mp_err mp_neg(const mp_int *a, mp_int *b) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && b != NULL, MP_BADARG); |
++ |
++ if((res = mp_copy(a, b)) != MP_OKAY) |
++ return res; |
++ |
++ if(s_mp_cmp_d(b, 0) == MP_EQ) |
++ SIGN(b) = ZPOS; |
++ else |
++ SIGN(b) = (SIGN(b) == NEG) ? ZPOS : NEG; |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_neg() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_add(a, b, c) */ |
++ |
++/* |
++ mp_add(a, b, c) |
++ |
++ Compute c = a + b. All parameters may be identical. |
++ */ |
++ |
++mp_err mp_add(const mp_int *a, const mp_int *b, mp_int *c) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG); |
++ |
++ if(SIGN(a) == SIGN(b)) { /* same sign: add values, keep sign */ |
++ MP_CHECKOK( s_mp_add_3arg(a, b, c) ); |
++ } else if(s_mp_cmp(a, b) >= 0) { /* different sign: |a| >= |b| */ |
++ MP_CHECKOK( s_mp_sub_3arg(a, b, c) ); |
++ } else { /* different sign: |a| < |b| */ |
++ MP_CHECKOK( s_mp_sub_3arg(b, a, c) ); |
++ } |
++ |
++ if (s_mp_cmp_d(c, 0) == MP_EQ) |
++ SIGN(c) = ZPOS; |
++ |
++CLEANUP: |
++ return res; |
++ |
++} /* end mp_add() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_sub(a, b, c) */ |
++ |
++/* |
++ mp_sub(a, b, c) |
++ |
++ Compute c = a - b. All parameters may be identical. |
++ */ |
++ |
++mp_err mp_sub(const mp_int *a, const mp_int *b, mp_int *c) |
++{ |
++ mp_err res; |
++ int magDiff; |
++ |
++ ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG); |
++ |
++ if (a == b) { |
++ mp_zero(c); |
++ return MP_OKAY; |
++ } |
++ |
++ if (MP_SIGN(a) != MP_SIGN(b)) { |
++ MP_CHECKOK( s_mp_add_3arg(a, b, c) ); |
++ } else if (!(magDiff = s_mp_cmp(a, b))) { |
++ mp_zero(c); |
++ res = MP_OKAY; |
++ } else if (magDiff > 0) { |
++ MP_CHECKOK( s_mp_sub_3arg(a, b, c) ); |
++ } else { |
++ MP_CHECKOK( s_mp_sub_3arg(b, a, c) ); |
++ MP_SIGN(c) = !MP_SIGN(a); |
++ } |
++ |
++ if (s_mp_cmp_d(c, 0) == MP_EQ) |
++ MP_SIGN(c) = MP_ZPOS; |
++ |
++CLEANUP: |
++ return res; |
++ |
++} /* end mp_sub() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_mul(a, b, c) */ |
++ |
++/* |
++ mp_mul(a, b, c) |
++ |
++ Compute c = a * b. All parameters may be identical. |
++ */ |
++mp_err mp_mul(const mp_int *a, const mp_int *b, mp_int * c) |
++{ |
++ mp_digit *pb; |
++ mp_int tmp; |
++ mp_err res; |
++ mp_size ib; |
++ mp_size useda, usedb; |
++ |
++ ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG); |
++ |
++ if (a == c) { |
++ if ((res = mp_init_copy(&tmp, a)) != MP_OKAY) |
++ return res; |
++ if (a == b) |
++ b = &tmp; |
++ a = &tmp; |
++ } else if (b == c) { |
++ if ((res = mp_init_copy(&tmp, b)) != MP_OKAY) |
++ return res; |
++ b = &tmp; |
++ } else { |
++ MP_DIGITS(&tmp) = 0; |
++ } |
++ |
++ if (MP_USED(a) < MP_USED(b)) { |
++ const mp_int *xch = b; /* switch a and b, to do fewer outer loops */ |
++ b = a; |
++ a = xch; |
++ } |
++ |
++ MP_USED(c) = 1; MP_DIGIT(c, 0) = 0; |
++ if((res = s_mp_pad(c, USED(a) + USED(b))) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++#ifdef NSS_USE_COMBA |
++ if ((MP_USED(a) == MP_USED(b)) && IS_POWER_OF_2(MP_USED(b))) { |
++ if (MP_USED(a) == 4) { |
++ s_mp_mul_comba_4(a, b, c); |
++ goto CLEANUP; |
++ } |
++ if (MP_USED(a) == 8) { |
++ s_mp_mul_comba_8(a, b, c); |
++ goto CLEANUP; |
++ } |
++ if (MP_USED(a) == 16) { |
++ s_mp_mul_comba_16(a, b, c); |
++ goto CLEANUP; |
++ } |
++ if (MP_USED(a) == 32) { |
++ s_mp_mul_comba_32(a, b, c); |
++ goto CLEANUP; |
++ } |
++ } |
++#endif |
++ |
++ pb = MP_DIGITS(b); |
++ s_mpv_mul_d(MP_DIGITS(a), MP_USED(a), *pb++, MP_DIGITS(c)); |
++ |
++ /* Outer loop: Digits of b */ |
++ useda = MP_USED(a); |
++ usedb = MP_USED(b); |
++ for (ib = 1; ib < usedb; ib++) { |
++ mp_digit b_i = *pb++; |
++ |
++ /* Inner product: Digits of a */ |
++ if (b_i) |
++ s_mpv_mul_d_add(MP_DIGITS(a), useda, b_i, MP_DIGITS(c) + ib); |
++ else |
++ MP_DIGIT(c, ib + useda) = b_i; |
++ } |
++ |
++ s_mp_clamp(c); |
++ |
++ if(SIGN(a) == SIGN(b) || s_mp_cmp_d(c, 0) == MP_EQ) |
++ SIGN(c) = ZPOS; |
++ else |
++ SIGN(c) = NEG; |
++ |
++CLEANUP: |
++ mp_clear(&tmp); |
++ return res; |
++} /* end mp_mul() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_sqr(a, sqr) */ |
++ |
++#if MP_SQUARE |
++/* |
++ Computes the square of a. This can be done more |
++ efficiently than a general multiplication, because many of the |
++ computation steps are redundant when squaring. The inner product |
++ step is a bit more complicated, but we save a fair number of |
++ iterations of the multiplication loop. |
++ */ |
++ |
++/* sqr = a^2; Caller provides both a and tmp; */ |
++mp_err mp_sqr(const mp_int *a, mp_int *sqr) |
++{ |
++ mp_digit *pa; |
++ mp_digit d; |
++ mp_err res; |
++ mp_size ix; |
++ mp_int tmp; |
++ int count; |
++ |
++ ARGCHK(a != NULL && sqr != NULL, MP_BADARG); |
++ |
++ if (a == sqr) { |
++ if((res = mp_init_copy(&tmp, a)) != MP_OKAY) |
++ return res; |
++ a = &tmp; |
++ } else { |
++ DIGITS(&tmp) = 0; |
++ res = MP_OKAY; |
++ } |
++ |
++ ix = 2 * MP_USED(a); |
++ if (ix > MP_ALLOC(sqr)) { |
++ MP_USED(sqr) = 1; |
++ MP_CHECKOK( s_mp_grow(sqr, ix) ); |
++ } |
++ MP_USED(sqr) = ix; |
++ MP_DIGIT(sqr, 0) = 0; |
++ |
++#ifdef NSS_USE_COMBA |
++ if (IS_POWER_OF_2(MP_USED(a))) { |
++ if (MP_USED(a) == 4) { |
++ s_mp_sqr_comba_4(a, sqr); |
++ goto CLEANUP; |
++ } |
++ if (MP_USED(a) == 8) { |
++ s_mp_sqr_comba_8(a, sqr); |
++ goto CLEANUP; |
++ } |
++ if (MP_USED(a) == 16) { |
++ s_mp_sqr_comba_16(a, sqr); |
++ goto CLEANUP; |
++ } |
++ if (MP_USED(a) == 32) { |
++ s_mp_sqr_comba_32(a, sqr); |
++ goto CLEANUP; |
++ } |
++ } |
++#endif |
++ |
++ pa = MP_DIGITS(a); |
++ count = MP_USED(a) - 1; |
++ if (count > 0) { |
++ d = *pa++; |
++ s_mpv_mul_d(pa, count, d, MP_DIGITS(sqr) + 1); |
++ for (ix = 3; --count > 0; ix += 2) { |
++ d = *pa++; |
++ s_mpv_mul_d_add(pa, count, d, MP_DIGITS(sqr) + ix); |
++ } /* for(ix ...) */ |
++ MP_DIGIT(sqr, MP_USED(sqr)-1) = 0; /* above loop stopped short of this. */ |
++ |
++ /* now sqr *= 2 */ |
++ s_mp_mul_2(sqr); |
++ } else { |
++ MP_DIGIT(sqr, 1) = 0; |
++ } |
++ |
++ /* now add the squares of the digits of a to sqr. */ |
++ s_mpv_sqr_add_prop(MP_DIGITS(a), MP_USED(a), MP_DIGITS(sqr)); |
++ |
++ SIGN(sqr) = ZPOS; |
++ s_mp_clamp(sqr); |
++ |
++CLEANUP: |
++ mp_clear(&tmp); |
++ return res; |
++ |
++} /* end mp_sqr() */ |
++#endif |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_div(a, b, q, r) */ |
++ |
++/* |
++ mp_div(a, b, q, r) |
++ |
++ Compute q = a / b and r = a mod b. Input parameters may be re-used |
++ as output parameters. If q or r is NULL, that portion of the |
++ computation will be discarded (although it will still be computed) |
++ */ |
++mp_err mp_div(const mp_int *a, const mp_int *b, mp_int *q, mp_int *r) |
++{ |
++ mp_err res; |
++ mp_int *pQ, *pR; |
++ mp_int qtmp, rtmp, btmp; |
++ int cmp; |
++ mp_sign signA; |
++ mp_sign signB; |
++ |
++ ARGCHK(a != NULL && b != NULL, MP_BADARG); |
++ |
++ signA = MP_SIGN(a); |
++ signB = MP_SIGN(b); |
++ |
++ if(mp_cmp_z(b) == MP_EQ) |
++ return MP_RANGE; |
++ |
++ DIGITS(&qtmp) = 0; |
++ DIGITS(&rtmp) = 0; |
++ DIGITS(&btmp) = 0; |
++ |
++ /* Set up some temporaries... */ |
++ if (!r || r == a || r == b) { |
++ MP_CHECKOK( mp_init_copy(&rtmp, a) ); |
++ pR = &rtmp; |
++ } else { |
++ MP_CHECKOK( mp_copy(a, r) ); |
++ pR = r; |
++ } |
++ |
++ if (!q || q == a || q == b) { |
++ MP_CHECKOK( mp_init_size(&qtmp, MP_USED(a)) ); |
++ pQ = &qtmp; |
++ } else { |
++ MP_CHECKOK( s_mp_pad(q, MP_USED(a)) ); |
++ pQ = q; |
++ mp_zero(pQ); |
++ } |
++ |
++ /* |
++ If |a| <= |b|, we can compute the solution without division; |
++ otherwise, we actually do the work required. |
++ */ |
++ if ((cmp = s_mp_cmp(a, b)) <= 0) { |
++ if (cmp) { |
++ /* r was set to a above. */ |
++ mp_zero(pQ); |
++ } else { |
++ mp_set(pQ, 1); |
++ mp_zero(pR); |
++ } |
++ } else { |
++ MP_CHECKOK( mp_init_copy(&btmp, b) ); |
++ MP_CHECKOK( s_mp_div(pR, &btmp, pQ) ); |
++ } |
++ |
++ /* Compute the signs for the output */ |
++ MP_SIGN(pR) = signA; /* Sr = Sa */ |
++ /* Sq = ZPOS if Sa == Sb */ /* Sq = NEG if Sa != Sb */ |
++ MP_SIGN(pQ) = (signA == signB) ? ZPOS : NEG; |
++ |
++ if(s_mp_cmp_d(pQ, 0) == MP_EQ) |
++ SIGN(pQ) = ZPOS; |
++ if(s_mp_cmp_d(pR, 0) == MP_EQ) |
++ SIGN(pR) = ZPOS; |
++ |
++ /* Copy output, if it is needed */ |
++ if(q && q != pQ) |
++ s_mp_exch(pQ, q); |
++ |
++ if(r && r != pR) |
++ s_mp_exch(pR, r); |
++ |
++CLEANUP: |
++ mp_clear(&btmp); |
++ mp_clear(&rtmp); |
++ mp_clear(&qtmp); |
++ |
++ return res; |
++ |
++} /* end mp_div() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_div_2d(a, d, q, r) */ |
++ |
++mp_err mp_div_2d(const mp_int *a, mp_digit d, mp_int *q, mp_int *r) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL, MP_BADARG); |
++ |
++ if(q) { |
++ if((res = mp_copy(a, q)) != MP_OKAY) |
++ return res; |
++ } |
++ if(r) { |
++ if((res = mp_copy(a, r)) != MP_OKAY) |
++ return res; |
++ } |
++ if(q) { |
++ s_mp_div_2d(q, d); |
++ } |
++ if(r) { |
++ s_mp_mod_2d(r, d); |
++ } |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_div_2d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_expt(a, b, c) */ |
++ |
++/* |
++ mp_expt(a, b, c) |
++ |
++ Compute c = a ** b, that is, raise a to the b power. Uses a |
++ standard iterative square-and-multiply technique. |
++ */ |
++ |
++mp_err mp_expt(mp_int *a, mp_int *b, mp_int *c) |
++{ |
++ mp_int s, x; |
++ mp_err res; |
++ mp_digit d; |
++ int dig, bit; |
++ |
++ ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG); |
++ |
++ if(mp_cmp_z(b) < 0) |
++ return MP_RANGE; |
++ |
++ if((res = mp_init(&s)) != MP_OKAY) |
++ return res; |
++ |
++ mp_set(&s, 1); |
++ |
++ if((res = mp_init_copy(&x, a)) != MP_OKAY) |
++ goto X; |
++ |
++ /* Loop over low-order digits in ascending order */ |
++ for(dig = 0; dig < (USED(b) - 1); dig++) { |
++ d = DIGIT(b, dig); |
++ |
++ /* Loop over bits of each non-maximal digit */ |
++ for(bit = 0; bit < DIGIT_BIT; bit++) { |
++ if(d & 1) { |
++ if((res = s_mp_mul(&s, &x)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ |
++ d >>= 1; |
++ |
++ if((res = s_mp_sqr(&x)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ } |
++ |
++ /* Consider now the last digit... */ |
++ d = DIGIT(b, dig); |
++ |
++ while(d) { |
++ if(d & 1) { |
++ if((res = s_mp_mul(&s, &x)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ |
++ d >>= 1; |
++ |
++ if((res = s_mp_sqr(&x)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ |
++ if(mp_iseven(b)) |
++ SIGN(&s) = SIGN(a); |
++ |
++ res = mp_copy(&s, c); |
++ |
++CLEANUP: |
++ mp_clear(&x); |
++X: |
++ mp_clear(&s); |
++ |
++ return res; |
++ |
++} /* end mp_expt() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_2expt(a, k) */ |
++ |
++/* Compute a = 2^k */ |
++ |
++mp_err mp_2expt(mp_int *a, mp_digit k) |
++{ |
++ ARGCHK(a != NULL, MP_BADARG); |
++ |
++ return s_mp_2expt(a, k); |
++ |
++} /* end mp_2expt() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_mod(a, m, c) */ |
++ |
++/* |
++ mp_mod(a, m, c) |
++ |
++ Compute c = a (mod m). Result will always be 0 <= c < m. |
++ */ |
++ |
++mp_err mp_mod(const mp_int *a, const mp_int *m, mp_int *c) |
++{ |
++ mp_err res; |
++ int mag; |
++ |
++ ARGCHK(a != NULL && m != NULL && c != NULL, MP_BADARG); |
++ |
++ if(SIGN(m) == NEG) |
++ return MP_RANGE; |
++ |
++ /* |
++ If |a| > m, we need to divide to get the remainder and take the |
++ absolute value. |
++ |
++ If |a| < m, we don't need to do any division, just copy and adjust |
++ the sign (if a is negative). |
++ |
++ If |a| == m, we can simply set the result to zero. |
++ |
++ This order is intended to minimize the average path length of the |
++ comparison chain on common workloads -- the most frequent cases are |
++ that |a| != m, so we do those first. |
++ */ |
++ if((mag = s_mp_cmp(a, m)) > 0) { |
++ if((res = mp_div(a, m, NULL, c)) != MP_OKAY) |
++ return res; |
++ |
++ if(SIGN(c) == NEG) { |
++ if((res = mp_add(c, m, c)) != MP_OKAY) |
++ return res; |
++ } |
++ |
++ } else if(mag < 0) { |
++ if((res = mp_copy(a, c)) != MP_OKAY) |
++ return res; |
++ |
++ if(mp_cmp_z(a) < 0) { |
++ if((res = mp_add(c, m, c)) != MP_OKAY) |
++ return res; |
++ |
++ } |
++ |
++ } else { |
++ mp_zero(c); |
++ |
++ } |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_mod() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_mod_d(a, d, c) */ |
++ |
++/* |
++ mp_mod_d(a, d, c) |
++ |
++ Compute c = a (mod d). Result will always be 0 <= c < d |
++ */ |
++mp_err mp_mod_d(const mp_int *a, mp_digit d, mp_digit *c) |
++{ |
++ mp_err res; |
++ mp_digit rem; |
++ |
++ ARGCHK(a != NULL && c != NULL, MP_BADARG); |
++ |
++ if(s_mp_cmp_d(a, d) > 0) { |
++ if((res = mp_div_d(a, d, NULL, &rem)) != MP_OKAY) |
++ return res; |
++ |
++ } else { |
++ if(SIGN(a) == NEG) |
++ rem = d - DIGIT(a, 0); |
++ else |
++ rem = DIGIT(a, 0); |
++ } |
++ |
++ if(c) |
++ *c = rem; |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_mod_d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_sqrt(a, b) */ |
++ |
++/* |
++ mp_sqrt(a, b) |
++ |
++ Compute the integer square root of a, and store the result in b. |
++ Uses an integer-arithmetic version of Newton's iterative linear |
++ approximation technique to determine this value; the result has the |
++ following two properties: |
++ |
++ b^2 <= a |
++ (b+1)^2 >= a |
++ |
++ It is a range error to pass a negative value. |
++ */ |
++mp_err mp_sqrt(const mp_int *a, mp_int *b) |
++{ |
++ mp_int x, t; |
++ mp_err res; |
++ mp_size used; |
++ |
++ ARGCHK(a != NULL && b != NULL, MP_BADARG); |
++ |
++ /* Cannot take square root of a negative value */ |
++ if(SIGN(a) == NEG) |
++ return MP_RANGE; |
++ |
++ /* Special cases for zero and one, trivial */ |
++ if(mp_cmp_d(a, 1) <= 0) |
++ return mp_copy(a, b); |
++ |
++ /* Initialize the temporaries we'll use below */ |
++ if((res = mp_init_size(&t, USED(a))) != MP_OKAY) |
++ return res; |
++ |
++ /* Compute an initial guess for the iteration as a itself */ |
++ if((res = mp_init_copy(&x, a)) != MP_OKAY) |
++ goto X; |
++ |
++ used = MP_USED(&x); |
++ if (used > 1) { |
++ s_mp_rshd(&x, used / 2); |
++ } |
++ |
++ for(;;) { |
++ /* t = (x * x) - a */ |
++ mp_copy(&x, &t); /* can't fail, t is big enough for original x */ |
++ if((res = mp_sqr(&t, &t)) != MP_OKAY || |
++ (res = mp_sub(&t, a, &t)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ /* t = t / 2x */ |
++ s_mp_mul_2(&x); |
++ if((res = mp_div(&t, &x, &t, NULL)) != MP_OKAY) |
++ goto CLEANUP; |
++ s_mp_div_2(&x); |
++ |
++ /* Terminate the loop, if the quotient is zero */ |
++ if(mp_cmp_z(&t) == MP_EQ) |
++ break; |
++ |
++ /* x = x - t */ |
++ if((res = mp_sub(&x, &t, &x)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ } |
++ |
++ /* Copy result to output parameter */ |
++ mp_sub_d(&x, 1, &x); |
++ s_mp_exch(&x, b); |
++ |
++ CLEANUP: |
++ mp_clear(&x); |
++ X: |
++ mp_clear(&t); |
++ |
++ return res; |
++ |
++} /* end mp_sqrt() */ |
++ |
++/* }}} */ |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* {{{ Modular arithmetic */ |
++ |
++#if MP_MODARITH |
++/* {{{ mp_addmod(a, b, m, c) */ |
++ |
++/* |
++ mp_addmod(a, b, m, c) |
++ |
++ Compute c = (a + b) mod m |
++ */ |
++ |
++mp_err mp_addmod(const mp_int *a, const mp_int *b, const mp_int *m, mp_int *c) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && b != NULL && m != NULL && c != NULL, MP_BADARG); |
++ |
++ if((res = mp_add(a, b, c)) != MP_OKAY) |
++ return res; |
++ if((res = mp_mod(c, m, c)) != MP_OKAY) |
++ return res; |
++ |
++ return MP_OKAY; |
++ |
++} |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_submod(a, b, m, c) */ |
++ |
++/* |
++ mp_submod(a, b, m, c) |
++ |
++ Compute c = (a - b) mod m |
++ */ |
++ |
++mp_err mp_submod(const mp_int *a, const mp_int *b, const mp_int *m, mp_int *c) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && b != NULL && m != NULL && c != NULL, MP_BADARG); |
++ |
++ if((res = mp_sub(a, b, c)) != MP_OKAY) |
++ return res; |
++ if((res = mp_mod(c, m, c)) != MP_OKAY) |
++ return res; |
++ |
++ return MP_OKAY; |
++ |
++} |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_mulmod(a, b, m, c) */ |
++ |
++/* |
++ mp_mulmod(a, b, m, c) |
++ |
++ Compute c = (a * b) mod m |
++ */ |
++ |
++mp_err mp_mulmod(const mp_int *a, const mp_int *b, const mp_int *m, mp_int *c) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && b != NULL && m != NULL && c != NULL, MP_BADARG); |
++ |
++ if((res = mp_mul(a, b, c)) != MP_OKAY) |
++ return res; |
++ if((res = mp_mod(c, m, c)) != MP_OKAY) |
++ return res; |
++ |
++ return MP_OKAY; |
++ |
++} |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_sqrmod(a, m, c) */ |
++ |
++#if MP_SQUARE |
++mp_err mp_sqrmod(const mp_int *a, const mp_int *m, mp_int *c) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && m != NULL && c != NULL, MP_BADARG); |
++ |
++ if((res = mp_sqr(a, c)) != MP_OKAY) |
++ return res; |
++ if((res = mp_mod(c, m, c)) != MP_OKAY) |
++ return res; |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_sqrmod() */ |
++#endif |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_exptmod(a, b, m, c) */ |
++ |
++/* |
++ s_mp_exptmod(a, b, m, c) |
++ |
++ Compute c = (a ** b) mod m. Uses a standard square-and-multiply |
++ method with modular reductions at each step. (This is basically the |
++ same code as mp_expt(), except for the addition of the reductions) |
++ |
++ The modular reductions are done using Barrett's algorithm (see |
++ s_mp_reduce() below for details) |
++ */ |
++ |
++mp_err s_mp_exptmod(const mp_int *a, const mp_int *b, const mp_int *m, mp_int *c) |
++{ |
++ mp_int s, x, mu; |
++ mp_err res; |
++ mp_digit d; |
++ int dig, bit; |
++ |
++ ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG); |
++ |
++ if(mp_cmp_z(b) < 0 || mp_cmp_z(m) <= 0) |
++ return MP_RANGE; |
++ |
++ if((res = mp_init(&s)) != MP_OKAY) |
++ return res; |
++ if((res = mp_init_copy(&x, a)) != MP_OKAY || |
++ (res = mp_mod(&x, m, &x)) != MP_OKAY) |
++ goto X; |
++ if((res = mp_init(&mu)) != MP_OKAY) |
++ goto MU; |
++ |
++ mp_set(&s, 1); |
++ |
++ /* mu = b^2k / m */ |
++ s_mp_add_d(&mu, 1); |
++ s_mp_lshd(&mu, 2 * USED(m)); |
++ if((res = mp_div(&mu, m, &mu, NULL)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ /* Loop over digits of b in ascending order, except highest order */ |
++ for(dig = 0; dig < (USED(b) - 1); dig++) { |
++ d = DIGIT(b, dig); |
++ |
++ /* Loop over the bits of the lower-order digits */ |
++ for(bit = 0; bit < DIGIT_BIT; bit++) { |
++ if(d & 1) { |
++ if((res = s_mp_mul(&s, &x)) != MP_OKAY) |
++ goto CLEANUP; |
++ if((res = s_mp_reduce(&s, m, &mu)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ |
++ d >>= 1; |
++ |
++ if((res = s_mp_sqr(&x)) != MP_OKAY) |
++ goto CLEANUP; |
++ if((res = s_mp_reduce(&x, m, &mu)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ } |
++ |
++ /* Now do the last digit... */ |
++ d = DIGIT(b, dig); |
++ |
++ while(d) { |
++ if(d & 1) { |
++ if((res = s_mp_mul(&s, &x)) != MP_OKAY) |
++ goto CLEANUP; |
++ if((res = s_mp_reduce(&s, m, &mu)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ |
++ d >>= 1; |
++ |
++ if((res = s_mp_sqr(&x)) != MP_OKAY) |
++ goto CLEANUP; |
++ if((res = s_mp_reduce(&x, m, &mu)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ |
++ s_mp_exch(&s, c); |
++ |
++ CLEANUP: |
++ mp_clear(&mu); |
++ MU: |
++ mp_clear(&x); |
++ X: |
++ mp_clear(&s); |
++ |
++ return res; |
++ |
++} /* end s_mp_exptmod() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_exptmod_d(a, d, m, c) */ |
++ |
++mp_err mp_exptmod_d(const mp_int *a, mp_digit d, const mp_int *m, mp_int *c) |
++{ |
++ mp_int s, x; |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && c != NULL, MP_BADARG); |
++ |
++ if((res = mp_init(&s)) != MP_OKAY) |
++ return res; |
++ if((res = mp_init_copy(&x, a)) != MP_OKAY) |
++ goto X; |
++ |
++ mp_set(&s, 1); |
++ |
++ while(d != 0) { |
++ if(d & 1) { |
++ if((res = s_mp_mul(&s, &x)) != MP_OKAY || |
++ (res = mp_mod(&s, m, &s)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ |
++ d /= 2; |
++ |
++ if((res = s_mp_sqr(&x)) != MP_OKAY || |
++ (res = mp_mod(&x, m, &x)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ |
++ s_mp_exch(&s, c); |
++ |
++CLEANUP: |
++ mp_clear(&x); |
++X: |
++ mp_clear(&s); |
++ |
++ return res; |
++ |
++} /* end mp_exptmod_d() */ |
++ |
++/* }}} */ |
++#endif /* if MP_MODARITH */ |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* {{{ Comparison functions */ |
++ |
++/* {{{ mp_cmp_z(a) */ |
++ |
++/* |
++ mp_cmp_z(a) |
++ |
++ Compare a <=> 0. Returns <0 if a<0, 0 if a=0, >0 if a>0. |
++ */ |
++ |
++int mp_cmp_z(const mp_int *a) |
++{ |
++ if(SIGN(a) == NEG) |
++ return MP_LT; |
++ else if(USED(a) == 1 && DIGIT(a, 0) == 0) |
++ return MP_EQ; |
++ else |
++ return MP_GT; |
++ |
++} /* end mp_cmp_z() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_cmp_d(a, d) */ |
++ |
++/* |
++ mp_cmp_d(a, d) |
++ |
++ Compare a <=> d. Returns <0 if a<d, 0 if a=d, >0 if a>d |
++ */ |
++ |
++int mp_cmp_d(const mp_int *a, mp_digit d) |
++{ |
++ ARGCHK(a != NULL, MP_EQ); |
++ |
++ if(SIGN(a) == NEG) |
++ return MP_LT; |
++ |
++ return s_mp_cmp_d(a, d); |
++ |
++} /* end mp_cmp_d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_cmp(a, b) */ |
++ |
++int mp_cmp(const mp_int *a, const mp_int *b) |
++{ |
++ ARGCHK(a != NULL && b != NULL, MP_EQ); |
++ |
++ if(SIGN(a) == SIGN(b)) { |
++ int mag; |
++ |
++ if((mag = s_mp_cmp(a, b)) == MP_EQ) |
++ return MP_EQ; |
++ |
++ if(SIGN(a) == ZPOS) |
++ return mag; |
++ else |
++ return -mag; |
++ |
++ } else if(SIGN(a) == ZPOS) { |
++ return MP_GT; |
++ } else { |
++ return MP_LT; |
++ } |
++ |
++} /* end mp_cmp() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_cmp_mag(a, b) */ |
++ |
++/* |
++ mp_cmp_mag(a, b) |
++ |
++ Compares |a| <=> |b|, and returns an appropriate comparison result |
++ */ |
++ |
++int mp_cmp_mag(mp_int *a, mp_int *b) |
++{ |
++ ARGCHK(a != NULL && b != NULL, MP_EQ); |
++ |
++ return s_mp_cmp(a, b); |
++ |
++} /* end mp_cmp_mag() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_cmp_int(a, z) */ |
++ |
++/* |
++ This just converts z to an mp_int, and uses the existing comparison |
++ routines. This is sort of inefficient, but it's not clear to me how |
++ frequently this wil get used anyway. For small positive constants, |
++ you can always use mp_cmp_d(), and for zero, there is mp_cmp_z(). |
++ */ |
++int mp_cmp_int(const mp_int *a, long z) |
++{ |
++ mp_int tmp; |
++ int out; |
++ |
++ ARGCHK(a != NULL, MP_EQ); |
++ |
++ mp_init(&tmp); mp_set_int(&tmp, z); |
++ out = mp_cmp(a, &tmp); |
++ mp_clear(&tmp); |
++ |
++ return out; |
++ |
++} /* end mp_cmp_int() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_isodd(a) */ |
++ |
++/* |
++ mp_isodd(a) |
++ |
++ Returns a true (non-zero) value if a is odd, false (zero) otherwise. |
++ */ |
++int mp_isodd(const mp_int *a) |
++{ |
++ ARGCHK(a != NULL, 0); |
++ |
++ return (int)(DIGIT(a, 0) & 1); |
++ |
++} /* end mp_isodd() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_iseven(a) */ |
++ |
++int mp_iseven(const mp_int *a) |
++{ |
++ return !mp_isodd(a); |
++ |
++} /* end mp_iseven() */ |
++ |
++/* }}} */ |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* {{{ Number theoretic functions */ |
++ |
++#if MP_NUMTH |
++/* {{{ mp_gcd(a, b, c) */ |
++ |
++/* |
++ Like the old mp_gcd() function, except computes the GCD using the |
++ binary algorithm due to Josef Stein in 1961 (via Knuth). |
++ */ |
++mp_err mp_gcd(mp_int *a, mp_int *b, mp_int *c) |
++{ |
++ mp_err res; |
++ mp_int u, v, t; |
++ mp_size k = 0; |
++ |
++ ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG); |
++ |
++ if(mp_cmp_z(a) == MP_EQ && mp_cmp_z(b) == MP_EQ) |
++ return MP_RANGE; |
++ if(mp_cmp_z(a) == MP_EQ) { |
++ return mp_copy(b, c); |
++ } else if(mp_cmp_z(b) == MP_EQ) { |
++ return mp_copy(a, c); |
++ } |
++ |
++ if((res = mp_init(&t)) != MP_OKAY) |
++ return res; |
++ if((res = mp_init_copy(&u, a)) != MP_OKAY) |
++ goto U; |
++ if((res = mp_init_copy(&v, b)) != MP_OKAY) |
++ goto V; |
++ |
++ SIGN(&u) = ZPOS; |
++ SIGN(&v) = ZPOS; |
++ |
++ /* Divide out common factors of 2 until at least 1 of a, b is even */ |
++ while(mp_iseven(&u) && mp_iseven(&v)) { |
++ s_mp_div_2(&u); |
++ s_mp_div_2(&v); |
++ ++k; |
++ } |
++ |
++ /* Initialize t */ |
++ if(mp_isodd(&u)) { |
++ if((res = mp_copy(&v, &t)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ /* t = -v */ |
++ if(SIGN(&v) == ZPOS) |
++ SIGN(&t) = NEG; |
++ else |
++ SIGN(&t) = ZPOS; |
++ |
++ } else { |
++ if((res = mp_copy(&u, &t)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ } |
++ |
++ for(;;) { |
++ while(mp_iseven(&t)) { |
++ s_mp_div_2(&t); |
++ } |
++ |
++ if(mp_cmp_z(&t) == MP_GT) { |
++ if((res = mp_copy(&t, &u)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ } else { |
++ if((res = mp_copy(&t, &v)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ /* v = -t */ |
++ if(SIGN(&t) == ZPOS) |
++ SIGN(&v) = NEG; |
++ else |
++ SIGN(&v) = ZPOS; |
++ } |
++ |
++ if((res = mp_sub(&u, &v, &t)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ if(s_mp_cmp_d(&t, 0) == MP_EQ) |
++ break; |
++ } |
++ |
++ s_mp_2expt(&v, k); /* v = 2^k */ |
++ res = mp_mul(&u, &v, c); /* c = u * v */ |
++ |
++ CLEANUP: |
++ mp_clear(&v); |
++ V: |
++ mp_clear(&u); |
++ U: |
++ mp_clear(&t); |
++ |
++ return res; |
++ |
++} /* end mp_gcd() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_lcm(a, b, c) */ |
++ |
++/* We compute the least common multiple using the rule: |
++ |
++ ab = [a, b](a, b) |
++ |
++ ... by computing the product, and dividing out the gcd. |
++ */ |
++ |
++mp_err mp_lcm(mp_int *a, mp_int *b, mp_int *c) |
++{ |
++ mp_int gcd, prod; |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG); |
++ |
++ /* Set up temporaries */ |
++ if((res = mp_init(&gcd)) != MP_OKAY) |
++ return res; |
++ if((res = mp_init(&prod)) != MP_OKAY) |
++ goto GCD; |
++ |
++ if((res = mp_mul(a, b, &prod)) != MP_OKAY) |
++ goto CLEANUP; |
++ if((res = mp_gcd(a, b, &gcd)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ res = mp_div(&prod, &gcd, c, NULL); |
++ |
++ CLEANUP: |
++ mp_clear(&prod); |
++ GCD: |
++ mp_clear(&gcd); |
++ |
++ return res; |
++ |
++} /* end mp_lcm() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_xgcd(a, b, g, x, y) */ |
++ |
++/* |
++ mp_xgcd(a, b, g, x, y) |
++ |
++ Compute g = (a, b) and values x and y satisfying Bezout's identity |
++ (that is, ax + by = g). This uses the binary extended GCD algorithm |
++ based on the Stein algorithm used for mp_gcd() |
++ See algorithm 14.61 in Handbook of Applied Cryptogrpahy. |
++ */ |
++ |
++mp_err mp_xgcd(const mp_int *a, const mp_int *b, mp_int *g, mp_int *x, mp_int *y) |
++{ |
++ mp_int gx, xc, yc, u, v, A, B, C, D; |
++ mp_int *clean[9]; |
++ mp_err res; |
++ int last = -1; |
++ |
++ if(mp_cmp_z(b) == 0) |
++ return MP_RANGE; |
++ |
++ /* Initialize all these variables we need */ |
++ MP_CHECKOK( mp_init(&u) ); |
++ clean[++last] = &u; |
++ MP_CHECKOK( mp_init(&v) ); |
++ clean[++last] = &v; |
++ MP_CHECKOK( mp_init(&gx) ); |
++ clean[++last] = &gx; |
++ MP_CHECKOK( mp_init(&A) ); |
++ clean[++last] = &A; |
++ MP_CHECKOK( mp_init(&B) ); |
++ clean[++last] = &B; |
++ MP_CHECKOK( mp_init(&C) ); |
++ clean[++last] = &C; |
++ MP_CHECKOK( mp_init(&D) ); |
++ clean[++last] = &D; |
++ MP_CHECKOK( mp_init_copy(&xc, a) ); |
++ clean[++last] = &xc; |
++ mp_abs(&xc, &xc); |
++ MP_CHECKOK( mp_init_copy(&yc, b) ); |
++ clean[++last] = &yc; |
++ mp_abs(&yc, &yc); |
++ |
++ mp_set(&gx, 1); |
++ |
++ /* Divide by two until at least one of them is odd */ |
++ while(mp_iseven(&xc) && mp_iseven(&yc)) { |
++ mp_size nx = mp_trailing_zeros(&xc); |
++ mp_size ny = mp_trailing_zeros(&yc); |
++ mp_size n = MP_MIN(nx, ny); |
++ s_mp_div_2d(&xc,n); |
++ s_mp_div_2d(&yc,n); |
++ MP_CHECKOK( s_mp_mul_2d(&gx,n) ); |
++ } |
++ |
++ mp_copy(&xc, &u); |
++ mp_copy(&yc, &v); |
++ mp_set(&A, 1); mp_set(&D, 1); |
++ |
++ /* Loop through binary GCD algorithm */ |
++ do { |
++ while(mp_iseven(&u)) { |
++ s_mp_div_2(&u); |
++ |
++ if(mp_iseven(&A) && mp_iseven(&B)) { |
++ s_mp_div_2(&A); s_mp_div_2(&B); |
++ } else { |
++ MP_CHECKOK( mp_add(&A, &yc, &A) ); |
++ s_mp_div_2(&A); |
++ MP_CHECKOK( mp_sub(&B, &xc, &B) ); |
++ s_mp_div_2(&B); |
++ } |
++ } |
++ |
++ while(mp_iseven(&v)) { |
++ s_mp_div_2(&v); |
++ |
++ if(mp_iseven(&C) && mp_iseven(&D)) { |
++ s_mp_div_2(&C); s_mp_div_2(&D); |
++ } else { |
++ MP_CHECKOK( mp_add(&C, &yc, &C) ); |
++ s_mp_div_2(&C); |
++ MP_CHECKOK( mp_sub(&D, &xc, &D) ); |
++ s_mp_div_2(&D); |
++ } |
++ } |
++ |
++ if(mp_cmp(&u, &v) >= 0) { |
++ MP_CHECKOK( mp_sub(&u, &v, &u) ); |
++ MP_CHECKOK( mp_sub(&A, &C, &A) ); |
++ MP_CHECKOK( mp_sub(&B, &D, &B) ); |
++ } else { |
++ MP_CHECKOK( mp_sub(&v, &u, &v) ); |
++ MP_CHECKOK( mp_sub(&C, &A, &C) ); |
++ MP_CHECKOK( mp_sub(&D, &B, &D) ); |
++ } |
++ } while (mp_cmp_z(&u) != 0); |
++ |
++ /* copy results to output */ |
++ if(x) |
++ MP_CHECKOK( mp_copy(&C, x) ); |
++ |
++ if(y) |
++ MP_CHECKOK( mp_copy(&D, y) ); |
++ |
++ if(g) |
++ MP_CHECKOK( mp_mul(&gx, &v, g) ); |
++ |
++ CLEANUP: |
++ while(last >= 0) |
++ mp_clear(clean[last--]); |
++ |
++ return res; |
++ |
++} /* end mp_xgcd() */ |
++ |
++/* }}} */ |
++ |
++mp_size mp_trailing_zeros(const mp_int *mp) |
++{ |
++ mp_digit d; |
++ mp_size n = 0; |
++ int ix; |
++ |
++ if (!mp || !MP_DIGITS(mp) || !mp_cmp_z(mp)) |
++ return n; |
++ |
++ for (ix = 0; !(d = MP_DIGIT(mp,ix)) && (ix < MP_USED(mp)); ++ix) |
++ n += MP_DIGIT_BIT; |
++ if (!d) |
++ return 0; /* shouldn't happen, but ... */ |
++#if !defined(MP_USE_UINT_DIGIT) |
++ if (!(d & 0xffffffffU)) { |
++ d >>= 32; |
++ n += 32; |
++ } |
++#endif |
++ if (!(d & 0xffffU)) { |
++ d >>= 16; |
++ n += 16; |
++ } |
++ if (!(d & 0xffU)) { |
++ d >>= 8; |
++ n += 8; |
++ } |
++ if (!(d & 0xfU)) { |
++ d >>= 4; |
++ n += 4; |
++ } |
++ if (!(d & 0x3U)) { |
++ d >>= 2; |
++ n += 2; |
++ } |
++ if (!(d & 0x1U)) { |
++ d >>= 1; |
++ n += 1; |
++ } |
++#if MP_ARGCHK == 2 |
++ assert(0 != (d & 1)); |
++#endif |
++ return n; |
++} |
++ |
++/* Given a and prime p, computes c and k such that a*c == 2**k (mod p). |
++** Returns k (positive) or error (negative). |
++** This technique from the paper "Fast Modular Reciprocals" (unpublished) |
++** by Richard Schroeppel (a.k.a. Captain Nemo). |
++*/ |
++mp_err s_mp_almost_inverse(const mp_int *a, const mp_int *p, mp_int *c) |
++{ |
++ mp_err res; |
++ mp_err k = 0; |
++ mp_int d, f, g; |
++ |
++ ARGCHK(a && p && c, MP_BADARG); |
++ |
++ MP_DIGITS(&d) = 0; |
++ MP_DIGITS(&f) = 0; |
++ MP_DIGITS(&g) = 0; |
++ MP_CHECKOK( mp_init(&d) ); |
++ MP_CHECKOK( mp_init_copy(&f, a) ); /* f = a */ |
++ MP_CHECKOK( mp_init_copy(&g, p) ); /* g = p */ |
++ |
++ mp_set(c, 1); |
++ mp_zero(&d); |
++ |
++ if (mp_cmp_z(&f) == 0) { |
++ res = MP_UNDEF; |
++ } else |
++ for (;;) { |
++ int diff_sign; |
++ while (mp_iseven(&f)) { |
++ mp_size n = mp_trailing_zeros(&f); |
++ if (!n) { |
++ res = MP_UNDEF; |
++ goto CLEANUP; |
++ } |
++ s_mp_div_2d(&f, n); |
++ MP_CHECKOK( s_mp_mul_2d(&d, n) ); |
++ k += n; |
++ } |
++ if (mp_cmp_d(&f, 1) == MP_EQ) { /* f == 1 */ |
++ res = k; |
++ break; |
++ } |
++ diff_sign = mp_cmp(&f, &g); |
++ if (diff_sign < 0) { /* f < g */ |
++ s_mp_exch(&f, &g); |
++ s_mp_exch(c, &d); |
++ } else if (diff_sign == 0) { /* f == g */ |
++ res = MP_UNDEF; /* a and p are not relatively prime */ |
++ break; |
++ } |
++ if ((MP_DIGIT(&f,0) % 4) == (MP_DIGIT(&g,0) % 4)) { |
++ MP_CHECKOK( mp_sub(&f, &g, &f) ); /* f = f - g */ |
++ MP_CHECKOK( mp_sub(c, &d, c) ); /* c = c - d */ |
++ } else { |
++ MP_CHECKOK( mp_add(&f, &g, &f) ); /* f = f + g */ |
++ MP_CHECKOK( mp_add(c, &d, c) ); /* c = c + d */ |
++ } |
++ } |
++ if (res >= 0) { |
++ while (MP_SIGN(c) != MP_ZPOS) { |
++ MP_CHECKOK( mp_add(c, p, c) ); |
++ } |
++ res = k; |
++ } |
++ |
++CLEANUP: |
++ mp_clear(&d); |
++ mp_clear(&f); |
++ mp_clear(&g); |
++ return res; |
++} |
++ |
++/* Compute T = (P ** -1) mod MP_RADIX. Also works for 16-bit mp_digits. |
++** This technique from the paper "Fast Modular Reciprocals" (unpublished) |
++** by Richard Schroeppel (a.k.a. Captain Nemo). |
++*/ |
++mp_digit s_mp_invmod_radix(mp_digit P) |
++{ |
++ mp_digit T = P; |
++ T *= 2 - (P * T); |
++ T *= 2 - (P * T); |
++ T *= 2 - (P * T); |
++ T *= 2 - (P * T); |
++#if !defined(MP_USE_UINT_DIGIT) |
++ T *= 2 - (P * T); |
++ T *= 2 - (P * T); |
++#endif |
++ return T; |
++} |
++ |
++/* Given c, k, and prime p, where a*c == 2**k (mod p), |
++** Compute x = (a ** -1) mod p. This is similar to Montgomery reduction. |
++** This technique from the paper "Fast Modular Reciprocals" (unpublished) |
++** by Richard Schroeppel (a.k.a. Captain Nemo). |
++*/ |
++mp_err s_mp_fixup_reciprocal(const mp_int *c, const mp_int *p, int k, mp_int *x) |
++{ |
++ int k_orig = k; |
++ mp_digit r; |
++ mp_size ix; |
++ mp_err res; |
++ |
++ if (mp_cmp_z(c) < 0) { /* c < 0 */ |
++ MP_CHECKOK( mp_add(c, p, x) ); /* x = c + p */ |
++ } else { |
++ MP_CHECKOK( mp_copy(c, x) ); /* x = c */ |
++ } |
++ |
++ /* make sure x is large enough */ |
++ ix = MP_HOWMANY(k, MP_DIGIT_BIT) + MP_USED(p) + 1; |
++ ix = MP_MAX(ix, MP_USED(x)); |
++ MP_CHECKOK( s_mp_pad(x, ix) ); |
++ |
++ r = 0 - s_mp_invmod_radix(MP_DIGIT(p,0)); |
++ |
++ for (ix = 0; k > 0; ix++) { |
++ int j = MP_MIN(k, MP_DIGIT_BIT); |
++ mp_digit v = r * MP_DIGIT(x, ix); |
++ if (j < MP_DIGIT_BIT) { |
++ v &= ((mp_digit)1 << j) - 1; /* v = v mod (2 ** j) */ |
++ } |
++ s_mp_mul_d_add_offset(p, v, x, ix); /* x += p * v * (RADIX ** ix) */ |
++ k -= j; |
++ } |
++ s_mp_clamp(x); |
++ s_mp_div_2d(x, k_orig); |
++ res = MP_OKAY; |
++ |
++CLEANUP: |
++ return res; |
++} |
++ |
++/* compute mod inverse using Schroeppel's method, only if m is odd */ |
++mp_err s_mp_invmod_odd_m(const mp_int *a, const mp_int *m, mp_int *c) |
++{ |
++ int k; |
++ mp_err res; |
++ mp_int x; |
++ |
++ ARGCHK(a && m && c, MP_BADARG); |
++ |
++ if(mp_cmp_z(a) == 0 || mp_cmp_z(m) == 0) |
++ return MP_RANGE; |
++ if (mp_iseven(m)) |
++ return MP_UNDEF; |
++ |
++ MP_DIGITS(&x) = 0; |
++ |
++ if (a == c) { |
++ if ((res = mp_init_copy(&x, a)) != MP_OKAY) |
++ return res; |
++ if (a == m) |
++ m = &x; |
++ a = &x; |
++ } else if (m == c) { |
++ if ((res = mp_init_copy(&x, m)) != MP_OKAY) |
++ return res; |
++ m = &x; |
++ } else { |
++ MP_DIGITS(&x) = 0; |
++ } |
++ |
++ MP_CHECKOK( s_mp_almost_inverse(a, m, c) ); |
++ k = res; |
++ MP_CHECKOK( s_mp_fixup_reciprocal(c, m, k, c) ); |
++CLEANUP: |
++ mp_clear(&x); |
++ return res; |
++} |
++ |
++/* Known good algorithm for computing modular inverse. But slow. */ |
++mp_err mp_invmod_xgcd(const mp_int *a, const mp_int *m, mp_int *c) |
++{ |
++ mp_int g, x; |
++ mp_err res; |
++ |
++ ARGCHK(a && m && c, MP_BADARG); |
++ |
++ if(mp_cmp_z(a) == 0 || mp_cmp_z(m) == 0) |
++ return MP_RANGE; |
++ |
++ MP_DIGITS(&g) = 0; |
++ MP_DIGITS(&x) = 0; |
++ MP_CHECKOK( mp_init(&x) ); |
++ MP_CHECKOK( mp_init(&g) ); |
++ |
++ MP_CHECKOK( mp_xgcd(a, m, &g, &x, NULL) ); |
++ |
++ if (mp_cmp_d(&g, 1) != MP_EQ) { |
++ res = MP_UNDEF; |
++ goto CLEANUP; |
++ } |
++ |
++ res = mp_mod(&x, m, c); |
++ SIGN(c) = SIGN(a); |
++ |
++CLEANUP: |
++ mp_clear(&x); |
++ mp_clear(&g); |
++ |
++ return res; |
++} |
++ |
++/* modular inverse where modulus is 2**k. */ |
++/* c = a**-1 mod 2**k */ |
++mp_err s_mp_invmod_2d(const mp_int *a, mp_size k, mp_int *c) |
++{ |
++ mp_err res; |
++ mp_size ix = k + 4; |
++ mp_int t0, t1, val, tmp, two2k; |
++ |
++ static const mp_digit d2 = 2; |
++ static const mp_int two = { MP_ZPOS, 1, 1, (mp_digit *)&d2 }; |
++ |
++ if (mp_iseven(a)) |
++ return MP_UNDEF; |
++ if (k <= MP_DIGIT_BIT) { |
++ mp_digit i = s_mp_invmod_radix(MP_DIGIT(a,0)); |
++ if (k < MP_DIGIT_BIT) |
++ i &= ((mp_digit)1 << k) - (mp_digit)1; |
++ mp_set(c, i); |
++ return MP_OKAY; |
++ } |
++ MP_DIGITS(&t0) = 0; |
++ MP_DIGITS(&t1) = 0; |
++ MP_DIGITS(&val) = 0; |
++ MP_DIGITS(&tmp) = 0; |
++ MP_DIGITS(&two2k) = 0; |
++ MP_CHECKOK( mp_init_copy(&val, a) ); |
++ s_mp_mod_2d(&val, k); |
++ MP_CHECKOK( mp_init_copy(&t0, &val) ); |
++ MP_CHECKOK( mp_init_copy(&t1, &t0) ); |
++ MP_CHECKOK( mp_init(&tmp) ); |
++ MP_CHECKOK( mp_init(&two2k) ); |
++ MP_CHECKOK( s_mp_2expt(&two2k, k) ); |
++ do { |
++ MP_CHECKOK( mp_mul(&val, &t1, &tmp) ); |
++ MP_CHECKOK( mp_sub(&two, &tmp, &tmp) ); |
++ MP_CHECKOK( mp_mul(&t1, &tmp, &t1) ); |
++ s_mp_mod_2d(&t1, k); |
++ while (MP_SIGN(&t1) != MP_ZPOS) { |
++ MP_CHECKOK( mp_add(&t1, &two2k, &t1) ); |
++ } |
++ if (mp_cmp(&t1, &t0) == MP_EQ) |
++ break; |
++ MP_CHECKOK( mp_copy(&t1, &t0) ); |
++ } while (--ix > 0); |
++ if (!ix) { |
++ res = MP_UNDEF; |
++ } else { |
++ mp_exch(c, &t1); |
++ } |
++ |
++CLEANUP: |
++ mp_clear(&t0); |
++ mp_clear(&t1); |
++ mp_clear(&val); |
++ mp_clear(&tmp); |
++ mp_clear(&two2k); |
++ return res; |
++} |
++ |
++mp_err s_mp_invmod_even_m(const mp_int *a, const mp_int *m, mp_int *c) |
++{ |
++ mp_err res; |
++ mp_size k; |
++ mp_int oddFactor, evenFactor; /* factors of the modulus */ |
++ mp_int oddPart, evenPart; /* parts to combine via CRT. */ |
++ mp_int C2, tmp1, tmp2; |
++ |
++ /*static const mp_digit d1 = 1; */ |
++ /*static const mp_int one = { MP_ZPOS, 1, 1, (mp_digit *)&d1 }; */ |
++ |
++ if ((res = s_mp_ispow2(m)) >= 0) { |
++ k = res; |
++ return s_mp_invmod_2d(a, k, c); |
++ } |
++ MP_DIGITS(&oddFactor) = 0; |
++ MP_DIGITS(&evenFactor) = 0; |
++ MP_DIGITS(&oddPart) = 0; |
++ MP_DIGITS(&evenPart) = 0; |
++ MP_DIGITS(&C2) = 0; |
++ MP_DIGITS(&tmp1) = 0; |
++ MP_DIGITS(&tmp2) = 0; |
++ |
++ MP_CHECKOK( mp_init_copy(&oddFactor, m) ); /* oddFactor = m */ |
++ MP_CHECKOK( mp_init(&evenFactor) ); |
++ MP_CHECKOK( mp_init(&oddPart) ); |
++ MP_CHECKOK( mp_init(&evenPart) ); |
++ MP_CHECKOK( mp_init(&C2) ); |
++ MP_CHECKOK( mp_init(&tmp1) ); |
++ MP_CHECKOK( mp_init(&tmp2) ); |
++ |
++ k = mp_trailing_zeros(m); |
++ s_mp_div_2d(&oddFactor, k); |
++ MP_CHECKOK( s_mp_2expt(&evenFactor, k) ); |
++ |
++ /* compute a**-1 mod oddFactor. */ |
++ MP_CHECKOK( s_mp_invmod_odd_m(a, &oddFactor, &oddPart) ); |
++ /* compute a**-1 mod evenFactor, where evenFactor == 2**k. */ |
++ MP_CHECKOK( s_mp_invmod_2d( a, k, &evenPart) ); |
++ |
++ /* Use Chinese Remainer theorem to compute a**-1 mod m. */ |
++ /* let m1 = oddFactor, v1 = oddPart, |
++ * let m2 = evenFactor, v2 = evenPart. |
++ */ |
++ |
++ /* Compute C2 = m1**-1 mod m2. */ |
++ MP_CHECKOK( s_mp_invmod_2d(&oddFactor, k, &C2) ); |
++ |
++ /* compute u = (v2 - v1)*C2 mod m2 */ |
++ MP_CHECKOK( mp_sub(&evenPart, &oddPart, &tmp1) ); |
++ MP_CHECKOK( mp_mul(&tmp1, &C2, &tmp2) ); |
++ s_mp_mod_2d(&tmp2, k); |
++ while (MP_SIGN(&tmp2) != MP_ZPOS) { |
++ MP_CHECKOK( mp_add(&tmp2, &evenFactor, &tmp2) ); |
++ } |
++ |
++ /* compute answer = v1 + u*m1 */ |
++ MP_CHECKOK( mp_mul(&tmp2, &oddFactor, c) ); |
++ MP_CHECKOK( mp_add(&oddPart, c, c) ); |
++ /* not sure this is necessary, but it's low cost if not. */ |
++ MP_CHECKOK( mp_mod(c, m, c) ); |
++ |
++CLEANUP: |
++ mp_clear(&oddFactor); |
++ mp_clear(&evenFactor); |
++ mp_clear(&oddPart); |
++ mp_clear(&evenPart); |
++ mp_clear(&C2); |
++ mp_clear(&tmp1); |
++ mp_clear(&tmp2); |
++ return res; |
++} |
++ |
++ |
++/* {{{ mp_invmod(a, m, c) */ |
++ |
++/* |
++ mp_invmod(a, m, c) |
++ |
++ Compute c = a^-1 (mod m), if there is an inverse for a (mod m). |
++ This is equivalent to the question of whether (a, m) = 1. If not, |
++ MP_UNDEF is returned, and there is no inverse. |
++ */ |
++ |
++mp_err mp_invmod(const mp_int *a, const mp_int *m, mp_int *c) |
++{ |
++ |
++ ARGCHK(a && m && c, MP_BADARG); |
++ |
++ if(mp_cmp_z(a) == 0 || mp_cmp_z(m) == 0) |
++ return MP_RANGE; |
++ |
++ if (mp_isodd(m)) { |
++ return s_mp_invmod_odd_m(a, m, c); |
++ } |
++ if (mp_iseven(a)) |
++ return MP_UNDEF; /* not invertable */ |
++ |
++ return s_mp_invmod_even_m(a, m, c); |
++ |
++} /* end mp_invmod() */ |
++ |
++/* }}} */ |
++#endif /* if MP_NUMTH */ |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* {{{ mp_print(mp, ofp) */ |
++ |
++#if MP_IOFUNC |
++/* |
++ mp_print(mp, ofp) |
++ |
++ Print a textual representation of the given mp_int on the output |
++ stream 'ofp'. Output is generated using the internal radix. |
++ */ |
++ |
++void mp_print(mp_int *mp, FILE *ofp) |
++{ |
++ int ix; |
++ |
++ if(mp == NULL || ofp == NULL) |
++ return; |
++ |
++ fputc((SIGN(mp) == NEG) ? '-' : '+', ofp); |
++ |
++ for(ix = USED(mp) - 1; ix >= 0; ix--) { |
++ fprintf(ofp, DIGIT_FMT, DIGIT(mp, ix)); |
++ } |
++ |
++} /* end mp_print() */ |
++ |
++#endif /* if MP_IOFUNC */ |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* {{{ More I/O Functions */ |
++ |
++/* {{{ mp_read_raw(mp, str, len) */ |
++ |
++/* |
++ mp_read_raw(mp, str, len) |
++ |
++ Read in a raw value (base 256) into the given mp_int |
++ */ |
++ |
++mp_err mp_read_raw(mp_int *mp, char *str, int len) |
++{ |
++ int ix; |
++ mp_err res; |
++ unsigned char *ustr = (unsigned char *)str; |
++ |
++ ARGCHK(mp != NULL && str != NULL && len > 0, MP_BADARG); |
++ |
++ mp_zero(mp); |
++ |
++ /* Get sign from first byte */ |
++ if(ustr[0]) |
++ SIGN(mp) = NEG; |
++ else |
++ SIGN(mp) = ZPOS; |
++ |
++ /* Read the rest of the digits */ |
++ for(ix = 1; ix < len; ix++) { |
++ if((res = mp_mul_d(mp, 256, mp)) != MP_OKAY) |
++ return res; |
++ if((res = mp_add_d(mp, ustr[ix], mp)) != MP_OKAY) |
++ return res; |
++ } |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_read_raw() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_raw_size(mp) */ |
++ |
++int mp_raw_size(mp_int *mp) |
++{ |
++ ARGCHK(mp != NULL, 0); |
++ |
++ return (USED(mp) * sizeof(mp_digit)) + 1; |
++ |
++} /* end mp_raw_size() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_toraw(mp, str) */ |
++ |
++mp_err mp_toraw(mp_int *mp, char *str) |
++{ |
++ int ix, jx, pos = 1; |
++ |
++ ARGCHK(mp != NULL && str != NULL, MP_BADARG); |
++ |
++ str[0] = (char)SIGN(mp); |
++ |
++ /* Iterate over each digit... */ |
++ for(ix = USED(mp) - 1; ix >= 0; ix--) { |
++ mp_digit d = DIGIT(mp, ix); |
++ |
++ /* Unpack digit bytes, high order first */ |
++ for(jx = sizeof(mp_digit) - 1; jx >= 0; jx--) { |
++ str[pos++] = (char)(d >> (jx * CHAR_BIT)); |
++ } |
++ } |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_toraw() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_read_radix(mp, str, radix) */ |
++ |
++/* |
++ mp_read_radix(mp, str, radix) |
++ |
++ Read an integer from the given string, and set mp to the resulting |
++ value. The input is presumed to be in base 10. Leading non-digit |
++ characters are ignored, and the function reads until a non-digit |
++ character or the end of the string. |
++ */ |
++ |
++mp_err mp_read_radix(mp_int *mp, const char *str, int radix) |
++{ |
++ int ix = 0, val = 0; |
++ mp_err res; |
++ mp_sign sig = ZPOS; |
++ |
++ ARGCHK(mp != NULL && str != NULL && radix >= 2 && radix <= MAX_RADIX, |
++ MP_BADARG); |
++ |
++ mp_zero(mp); |
++ |
++ /* Skip leading non-digit characters until a digit or '-' or '+' */ |
++ while(str[ix] && |
++ (s_mp_tovalue(str[ix], radix) < 0) && |
++ str[ix] != '-' && |
++ str[ix] != '+') { |
++ ++ix; |
++ } |
++ |
++ if(str[ix] == '-') { |
++ sig = NEG; |
++ ++ix; |
++ } else if(str[ix] == '+') { |
++ sig = ZPOS; /* this is the default anyway... */ |
++ ++ix; |
++ } |
++ |
++ while((val = s_mp_tovalue(str[ix], radix)) >= 0) { |
++ if((res = s_mp_mul_d(mp, radix)) != MP_OKAY) |
++ return res; |
++ if((res = s_mp_add_d(mp, val)) != MP_OKAY) |
++ return res; |
++ ++ix; |
++ } |
++ |
++ if(s_mp_cmp_d(mp, 0) == MP_EQ) |
++ SIGN(mp) = ZPOS; |
++ else |
++ SIGN(mp) = sig; |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_read_radix() */ |
++ |
++mp_err mp_read_variable_radix(mp_int *a, const char * str, int default_radix) |
++{ |
++ int radix = default_radix; |
++ int cx; |
++ mp_sign sig = ZPOS; |
++ mp_err res; |
++ |
++ /* Skip leading non-digit characters until a digit or '-' or '+' */ |
++ while ((cx = *str) != 0 && |
++ (s_mp_tovalue(cx, radix) < 0) && |
++ cx != '-' && |
++ cx != '+') { |
++ ++str; |
++ } |
++ |
++ if (cx == '-') { |
++ sig = NEG; |
++ ++str; |
++ } else if (cx == '+') { |
++ sig = ZPOS; /* this is the default anyway... */ |
++ ++str; |
++ } |
++ |
++ if (str[0] == '0') { |
++ if ((str[1] | 0x20) == 'x') { |
++ radix = 16; |
++ str += 2; |
++ } else { |
++ radix = 8; |
++ str++; |
++ } |
++ } |
++ res = mp_read_radix(a, str, radix); |
++ if (res == MP_OKAY) { |
++ MP_SIGN(a) = (s_mp_cmp_d(a, 0) == MP_EQ) ? ZPOS : sig; |
++ } |
++ return res; |
++} |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_radix_size(mp, radix) */ |
++ |
++int mp_radix_size(mp_int *mp, int radix) |
++{ |
++ int bits; |
++ |
++ if(!mp || radix < 2 || radix > MAX_RADIX) |
++ return 0; |
++ |
++ bits = USED(mp) * DIGIT_BIT - 1; |
++ |
++ return s_mp_outlen(bits, radix); |
++ |
++} /* end mp_radix_size() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_toradix(mp, str, radix) */ |
++ |
++mp_err mp_toradix(mp_int *mp, char *str, int radix) |
++{ |
++ int ix, pos = 0; |
++ |
++ ARGCHK(mp != NULL && str != NULL, MP_BADARG); |
++ ARGCHK(radix > 1 && radix <= MAX_RADIX, MP_RANGE); |
++ |
++ if(mp_cmp_z(mp) == MP_EQ) { |
++ str[0] = '0'; |
++ str[1] = '\0'; |
++ } else { |
++ mp_err res; |
++ mp_int tmp; |
++ mp_sign sgn; |
++ mp_digit rem, rdx = (mp_digit)radix; |
++ char ch; |
++ |
++ if((res = mp_init_copy(&tmp, mp)) != MP_OKAY) |
++ return res; |
++ |
++ /* Save sign for later, and take absolute value */ |
++ sgn = SIGN(&tmp); SIGN(&tmp) = ZPOS; |
++ |
++ /* Generate output digits in reverse order */ |
++ while(mp_cmp_z(&tmp) != 0) { |
++ if((res = mp_div_d(&tmp, rdx, &tmp, &rem)) != MP_OKAY) { |
++ mp_clear(&tmp); |
++ return res; |
++ } |
++ |
++ /* Generate digits, use capital letters */ |
++ ch = s_mp_todigit(rem, radix, 0); |
++ |
++ str[pos++] = ch; |
++ } |
++ |
++ /* Add - sign if original value was negative */ |
++ if(sgn == NEG) |
++ str[pos++] = '-'; |
++ |
++ /* Add trailing NUL to end the string */ |
++ str[pos--] = '\0'; |
++ |
++ /* Reverse the digits and sign indicator */ |
++ ix = 0; |
++ while(ix < pos) { |
++ char tmp = str[ix]; |
++ |
++ str[ix] = str[pos]; |
++ str[pos] = tmp; |
++ ++ix; |
++ --pos; |
++ } |
++ |
++ mp_clear(&tmp); |
++ } |
++ |
++ return MP_OKAY; |
++ |
++} /* end mp_toradix() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_tovalue(ch, r) */ |
++ |
++int mp_tovalue(char ch, int r) |
++{ |
++ return s_mp_tovalue(ch, r); |
++ |
++} /* end mp_tovalue() */ |
++ |
++/* }}} */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_strerror(ec) */ |
++ |
++/* |
++ mp_strerror(ec) |
++ |
++ Return a string describing the meaning of error code 'ec'. The |
++ string returned is allocated in static memory, so the caller should |
++ not attempt to modify or free the memory associated with this |
++ string. |
++ */ |
++const char *mp_strerror(mp_err ec) |
++{ |
++ int aec = (ec < 0) ? -ec : ec; |
++ |
++ /* Code values are negative, so the senses of these comparisons |
++ are accurate */ |
++ if(ec < MP_LAST_CODE || ec > MP_OKAY) { |
++ return mp_err_string[0]; /* unknown error code */ |
++ } else { |
++ return mp_err_string[aec + 1]; |
++ } |
++ |
++} /* end mp_strerror() */ |
++ |
++/* }}} */ |
++ |
++/*========================================================================*/ |
++/*------------------------------------------------------------------------*/ |
++/* Static function definitions (internal use only) */ |
++ |
++/* {{{ Memory management */ |
++ |
++/* {{{ s_mp_grow(mp, min) */ |
++ |
++/* Make sure there are at least 'min' digits allocated to mp */ |
++mp_err s_mp_grow(mp_int *mp, mp_size min) |
++{ |
++ if(min > ALLOC(mp)) { |
++ mp_digit *tmp; |
++ |
++ /* Set min to next nearest default precision block size */ |
++ min = MP_ROUNDUP(min, s_mp_defprec); |
++ |
++ if((tmp = s_mp_alloc(min, sizeof(mp_digit))) == NULL) |
++ return MP_MEM; |
++ |
++ s_mp_copy(DIGITS(mp), tmp, USED(mp)); |
++ |
++#if MP_CRYPTO |
++ s_mp_setz(DIGITS(mp), ALLOC(mp)); |
++#endif |
++ s_mp_free(DIGITS(mp)); |
++ DIGITS(mp) = tmp; |
++ ALLOC(mp) = min; |
++ } |
++ |
++ return MP_OKAY; |
++ |
++} /* end s_mp_grow() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_pad(mp, min) */ |
++ |
++/* Make sure the used size of mp is at least 'min', growing if needed */ |
++mp_err s_mp_pad(mp_int *mp, mp_size min) |
++{ |
++ if(min > USED(mp)) { |
++ mp_err res; |
++ |
++ /* Make sure there is room to increase precision */ |
++ if (min > ALLOC(mp)) { |
++ if ((res = s_mp_grow(mp, min)) != MP_OKAY) |
++ return res; |
++ } else { |
++ s_mp_setz(DIGITS(mp) + USED(mp), min - USED(mp)); |
++ } |
++ |
++ /* Increase precision; should already be 0-filled */ |
++ USED(mp) = min; |
++ } |
++ |
++ return MP_OKAY; |
++ |
++} /* end s_mp_pad() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_setz(dp, count) */ |
++ |
++#if MP_MACRO == 0 |
++/* Set 'count' digits pointed to by dp to be zeroes */ |
++void s_mp_setz(mp_digit *dp, mp_size count) |
++{ |
++#if MP_MEMSET == 0 |
++ int ix; |
++ |
++ for(ix = 0; ix < count; ix++) |
++ dp[ix] = 0; |
++#else |
++ memset(dp, 0, count * sizeof(mp_digit)); |
++#endif |
++ |
++} /* end s_mp_setz() */ |
++#endif |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_copy(sp, dp, count) */ |
++ |
++#if MP_MACRO == 0 |
++/* Copy 'count' digits from sp to dp */ |
++void s_mp_copy(const mp_digit *sp, mp_digit *dp, mp_size count) |
++{ |
++#if MP_MEMCPY == 0 |
++ int ix; |
++ |
++ for(ix = 0; ix < count; ix++) |
++ dp[ix] = sp[ix]; |
++#else |
++ memcpy(dp, sp, count * sizeof(mp_digit)); |
++#endif |
++ |
++} /* end s_mp_copy() */ |
++#endif |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_alloc(nb, ni) */ |
++ |
++#if MP_MACRO == 0 |
++/* Allocate ni records of nb bytes each, and return a pointer to that */ |
++void *s_mp_alloc(size_t nb, size_t ni) |
++{ |
++ ++mp_allocs; |
++ return calloc(nb, ni); |
++ |
++} /* end s_mp_alloc() */ |
++#endif |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_free(ptr) */ |
++ |
++#if MP_MACRO == 0 |
++/* Free the memory pointed to by ptr */ |
++void s_mp_free(void *ptr) |
++{ |
++ if(ptr) { |
++ ++mp_frees; |
++ free(ptr); |
++ } |
++} /* end s_mp_free() */ |
++#endif |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_clamp(mp) */ |
++ |
++#if MP_MACRO == 0 |
++/* Remove leading zeroes from the given value */ |
++void s_mp_clamp(mp_int *mp) |
++{ |
++ mp_size used = MP_USED(mp); |
++ while (used > 1 && DIGIT(mp, used - 1) == 0) |
++ --used; |
++ MP_USED(mp) = used; |
++} /* end s_mp_clamp() */ |
++#endif |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_exch(a, b) */ |
++ |
++/* Exchange the data for a and b; (b, a) = (a, b) */ |
++void s_mp_exch(mp_int *a, mp_int *b) |
++{ |
++ mp_int tmp; |
++ |
++ tmp = *a; |
++ *a = *b; |
++ *b = tmp; |
++ |
++} /* end s_mp_exch() */ |
++ |
++/* }}} */ |
++ |
++/* }}} */ |
++ |
++/* {{{ Arithmetic helpers */ |
++ |
++/* {{{ s_mp_lshd(mp, p) */ |
++ |
++/* |
++ Shift mp leftward by p digits, growing if needed, and zero-filling |
++ the in-shifted digits at the right end. This is a convenient |
++ alternative to multiplication by powers of the radix |
++ The value of USED(mp) must already have been set to the value for |
++ the shifted result. |
++ */ |
++ |
++mp_err s_mp_lshd(mp_int *mp, mp_size p) |
++{ |
++ mp_err res; |
++ mp_size pos; |
++ int ix; |
++ |
++ if(p == 0) |
++ return MP_OKAY; |
++ |
++ if (MP_USED(mp) == 1 && MP_DIGIT(mp, 0) == 0) |
++ return MP_OKAY; |
++ |
++ if((res = s_mp_pad(mp, USED(mp) + p)) != MP_OKAY) |
++ return res; |
++ |
++ pos = USED(mp) - 1; |
++ |
++ /* Shift all the significant figures over as needed */ |
++ for(ix = pos - p; ix >= 0; ix--) |
++ DIGIT(mp, ix + p) = DIGIT(mp, ix); |
++ |
++ /* Fill the bottom digits with zeroes */ |
++ for(ix = 0; ix < p; ix++) |
++ DIGIT(mp, ix) = 0; |
++ |
++ return MP_OKAY; |
++ |
++} /* end s_mp_lshd() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_mul_2d(mp, d) */ |
++ |
++/* |
++ Multiply the integer by 2^d, where d is a number of bits. This |
++ amounts to a bitwise shift of the value. |
++ */ |
++mp_err s_mp_mul_2d(mp_int *mp, mp_digit d) |
++{ |
++ mp_err res; |
++ mp_digit dshift, bshift; |
++ mp_digit mask; |
++ |
++ ARGCHK(mp != NULL, MP_BADARG); |
++ |
++ dshift = d / MP_DIGIT_BIT; |
++ bshift = d % MP_DIGIT_BIT; |
++ /* bits to be shifted out of the top word */ |
++ mask = ((mp_digit)~0 << (MP_DIGIT_BIT - bshift)); |
++ mask &= MP_DIGIT(mp, MP_USED(mp) - 1); |
++ |
++ if (MP_OKAY != (res = s_mp_pad(mp, MP_USED(mp) + dshift + (mask != 0) ))) |
++ return res; |
++ |
++ if (dshift && MP_OKAY != (res = s_mp_lshd(mp, dshift))) |
++ return res; |
++ |
++ if (bshift) { |
++ mp_digit *pa = MP_DIGITS(mp); |
++ mp_digit *alim = pa + MP_USED(mp); |
++ mp_digit prev = 0; |
++ |
++ for (pa += dshift; pa < alim; ) { |
++ mp_digit x = *pa; |
++ *pa++ = (x << bshift) | prev; |
++ prev = x >> (DIGIT_BIT - bshift); |
++ } |
++ } |
++ |
++ s_mp_clamp(mp); |
++ return MP_OKAY; |
++} /* end s_mp_mul_2d() */ |
++ |
++/* {{{ s_mp_rshd(mp, p) */ |
++ |
++/* |
++ Shift mp rightward by p digits. Maintains the invariant that |
++ digits above the precision are all zero. Digits shifted off the |
++ end are lost. Cannot fail. |
++ */ |
++ |
++void s_mp_rshd(mp_int *mp, mp_size p) |
++{ |
++ mp_size ix; |
++ mp_digit *src, *dst; |
++ |
++ if(p == 0) |
++ return; |
++ |
++ /* Shortcut when all digits are to be shifted off */ |
++ if(p >= USED(mp)) { |
++ s_mp_setz(DIGITS(mp), ALLOC(mp)); |
++ USED(mp) = 1; |
++ SIGN(mp) = ZPOS; |
++ return; |
++ } |
++ |
++ /* Shift all the significant figures over as needed */ |
++ dst = MP_DIGITS(mp); |
++ src = dst + p; |
++ for (ix = USED(mp) - p; ix > 0; ix--) |
++ *dst++ = *src++; |
++ |
++ MP_USED(mp) -= p; |
++ /* Fill the top digits with zeroes */ |
++ while (p-- > 0) |
++ *dst++ = 0; |
++ |
++#if 0 |
++ /* Strip off any leading zeroes */ |
++ s_mp_clamp(mp); |
++#endif |
++ |
++} /* end s_mp_rshd() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_div_2(mp) */ |
++ |
++/* Divide by two -- take advantage of radix properties to do it fast */ |
++void s_mp_div_2(mp_int *mp) |
++{ |
++ s_mp_div_2d(mp, 1); |
++ |
++} /* end s_mp_div_2() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_mul_2(mp) */ |
++ |
++mp_err s_mp_mul_2(mp_int *mp) |
++{ |
++ mp_digit *pd; |
++ int ix, used; |
++ mp_digit kin = 0; |
++ |
++ /* Shift digits leftward by 1 bit */ |
++ used = MP_USED(mp); |
++ pd = MP_DIGITS(mp); |
++ for (ix = 0; ix < used; ix++) { |
++ mp_digit d = *pd; |
++ *pd++ = (d << 1) | kin; |
++ kin = (d >> (DIGIT_BIT - 1)); |
++ } |
++ |
++ /* Deal with rollover from last digit */ |
++ if (kin) { |
++ if (ix >= ALLOC(mp)) { |
++ mp_err res; |
++ if((res = s_mp_grow(mp, ALLOC(mp) + 1)) != MP_OKAY) |
++ return res; |
++ } |
++ |
++ DIGIT(mp, ix) = kin; |
++ USED(mp) += 1; |
++ } |
++ |
++ return MP_OKAY; |
++ |
++} /* end s_mp_mul_2() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_mod_2d(mp, d) */ |
++ |
++/* |
++ Remainder the integer by 2^d, where d is a number of bits. This |
++ amounts to a bitwise AND of the value, and does not require the full |
++ division code |
++ */ |
++void s_mp_mod_2d(mp_int *mp, mp_digit d) |
++{ |
++ mp_size ndig = (d / DIGIT_BIT), nbit = (d % DIGIT_BIT); |
++ mp_size ix; |
++ mp_digit dmask; |
++ |
++ if(ndig >= USED(mp)) |
++ return; |
++ |
++ /* Flush all the bits above 2^d in its digit */ |
++ dmask = ((mp_digit)1 << nbit) - 1; |
++ DIGIT(mp, ndig) &= dmask; |
++ |
++ /* Flush all digits above the one with 2^d in it */ |
++ for(ix = ndig + 1; ix < USED(mp); ix++) |
++ DIGIT(mp, ix) = 0; |
++ |
++ s_mp_clamp(mp); |
++ |
++} /* end s_mp_mod_2d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_div_2d(mp, d) */ |
++ |
++/* |
++ Divide the integer by 2^d, where d is a number of bits. This |
++ amounts to a bitwise shift of the value, and does not require the |
++ full division code (used in Barrett reduction, see below) |
++ */ |
++void s_mp_div_2d(mp_int *mp, mp_digit d) |
++{ |
++ int ix; |
++ mp_digit save, next, mask; |
++ |
++ s_mp_rshd(mp, d / DIGIT_BIT); |
++ d %= DIGIT_BIT; |
++ if (d) { |
++ mask = ((mp_digit)1 << d) - 1; |
++ save = 0; |
++ for(ix = USED(mp) - 1; ix >= 0; ix--) { |
++ next = DIGIT(mp, ix) & mask; |
++ DIGIT(mp, ix) = (DIGIT(mp, ix) >> d) | (save << (DIGIT_BIT - d)); |
++ save = next; |
++ } |
++ } |
++ s_mp_clamp(mp); |
++ |
++} /* end s_mp_div_2d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_norm(a, b, *d) */ |
++ |
++/* |
++ s_mp_norm(a, b, *d) |
++ |
++ Normalize a and b for division, where b is the divisor. In order |
++ that we might make good guesses for quotient digits, we want the |
++ leading digit of b to be at least half the radix, which we |
++ accomplish by multiplying a and b by a power of 2. The exponent |
++ (shift count) is placed in *pd, so that the remainder can be shifted |
++ back at the end of the division process. |
++ */ |
++ |
++mp_err s_mp_norm(mp_int *a, mp_int *b, mp_digit *pd) |
++{ |
++ mp_digit d; |
++ mp_digit mask; |
++ mp_digit b_msd; |
++ mp_err res = MP_OKAY; |
++ |
++ d = 0; |
++ mask = DIGIT_MAX & ~(DIGIT_MAX >> 1); /* mask is msb of digit */ |
++ b_msd = DIGIT(b, USED(b) - 1); |
++ while (!(b_msd & mask)) { |
++ b_msd <<= 1; |
++ ++d; |
++ } |
++ |
++ if (d) { |
++ MP_CHECKOK( s_mp_mul_2d(a, d) ); |
++ MP_CHECKOK( s_mp_mul_2d(b, d) ); |
++ } |
++ |
++ *pd = d; |
++CLEANUP: |
++ return res; |
++ |
++} /* end s_mp_norm() */ |
++ |
++/* }}} */ |
++ |
++/* }}} */ |
++ |
++/* {{{ Primitive digit arithmetic */ |
++ |
++/* {{{ s_mp_add_d(mp, d) */ |
++ |
++/* Add d to |mp| in place */ |
++mp_err s_mp_add_d(mp_int *mp, mp_digit d) /* unsigned digit addition */ |
++{ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ mp_word w, k = 0; |
++ mp_size ix = 1; |
++ |
++ w = (mp_word)DIGIT(mp, 0) + d; |
++ DIGIT(mp, 0) = ACCUM(w); |
++ k = CARRYOUT(w); |
++ |
++ while(ix < USED(mp) && k) { |
++ w = (mp_word)DIGIT(mp, ix) + k; |
++ DIGIT(mp, ix) = ACCUM(w); |
++ k = CARRYOUT(w); |
++ ++ix; |
++ } |
++ |
++ if(k != 0) { |
++ mp_err res; |
++ |
++ if((res = s_mp_pad(mp, USED(mp) + 1)) != MP_OKAY) |
++ return res; |
++ |
++ DIGIT(mp, ix) = (mp_digit)k; |
++ } |
++ |
++ return MP_OKAY; |
++#else |
++ mp_digit * pmp = MP_DIGITS(mp); |
++ mp_digit sum, mp_i, carry = 0; |
++ mp_err res = MP_OKAY; |
++ int used = (int)MP_USED(mp); |
++ |
++ mp_i = *pmp; |
++ *pmp++ = sum = d + mp_i; |
++ carry = (sum < d); |
++ while (carry && --used > 0) { |
++ mp_i = *pmp; |
++ *pmp++ = sum = carry + mp_i; |
++ carry = !sum; |
++ } |
++ if (carry && !used) { |
++ /* mp is growing */ |
++ used = MP_USED(mp); |
++ MP_CHECKOK( s_mp_pad(mp, used + 1) ); |
++ MP_DIGIT(mp, used) = carry; |
++ } |
++CLEANUP: |
++ return res; |
++#endif |
++} /* end s_mp_add_d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_sub_d(mp, d) */ |
++ |
++/* Subtract d from |mp| in place, assumes |mp| > d */ |
++mp_err s_mp_sub_d(mp_int *mp, mp_digit d) /* unsigned digit subtract */ |
++{ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_SUB_WORD) |
++ mp_word w, b = 0; |
++ mp_size ix = 1; |
++ |
++ /* Compute initial subtraction */ |
++ w = (RADIX + (mp_word)DIGIT(mp, 0)) - d; |
++ b = CARRYOUT(w) ? 0 : 1; |
++ DIGIT(mp, 0) = ACCUM(w); |
++ |
++ /* Propagate borrows leftward */ |
++ while(b && ix < USED(mp)) { |
++ w = (RADIX + (mp_word)DIGIT(mp, ix)) - b; |
++ b = CARRYOUT(w) ? 0 : 1; |
++ DIGIT(mp, ix) = ACCUM(w); |
++ ++ix; |
++ } |
++ |
++ /* Remove leading zeroes */ |
++ s_mp_clamp(mp); |
++ |
++ /* If we have a borrow out, it's a violation of the input invariant */ |
++ if(b) |
++ return MP_RANGE; |
++ else |
++ return MP_OKAY; |
++#else |
++ mp_digit *pmp = MP_DIGITS(mp); |
++ mp_digit mp_i, diff, borrow; |
++ mp_size used = MP_USED(mp); |
++ |
++ mp_i = *pmp; |
++ *pmp++ = diff = mp_i - d; |
++ borrow = (diff > mp_i); |
++ while (borrow && --used) { |
++ mp_i = *pmp; |
++ *pmp++ = diff = mp_i - borrow; |
++ borrow = (diff > mp_i); |
++ } |
++ s_mp_clamp(mp); |
++ return (borrow && !used) ? MP_RANGE : MP_OKAY; |
++#endif |
++} /* end s_mp_sub_d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_mul_d(a, d) */ |
++ |
++/* Compute a = a * d, single digit multiplication */ |
++mp_err s_mp_mul_d(mp_int *a, mp_digit d) |
++{ |
++ mp_err res; |
++ mp_size used; |
++ int pow; |
++ |
++ if (!d) { |
++ mp_zero(a); |
++ return MP_OKAY; |
++ } |
++ if (d == 1) |
++ return MP_OKAY; |
++ if (0 <= (pow = s_mp_ispow2d(d))) { |
++ return s_mp_mul_2d(a, (mp_digit)pow); |
++ } |
++ |
++ used = MP_USED(a); |
++ MP_CHECKOK( s_mp_pad(a, used + 1) ); |
++ |
++ s_mpv_mul_d(MP_DIGITS(a), used, d, MP_DIGITS(a)); |
++ |
++ s_mp_clamp(a); |
++ |
++CLEANUP: |
++ return res; |
++ |
++} /* end s_mp_mul_d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_div_d(mp, d, r) */ |
++ |
++/* |
++ s_mp_div_d(mp, d, r) |
++ |
++ Compute the quotient mp = mp / d and remainder r = mp mod d, for a |
++ single digit d. If r is null, the remainder will be discarded. |
++ */ |
++ |
++mp_err s_mp_div_d(mp_int *mp, mp_digit d, mp_digit *r) |
++{ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_DIV_WORD) |
++ mp_word w = 0, q; |
++#else |
++ mp_digit w, q; |
++#endif |
++ int ix; |
++ mp_err res; |
++ mp_int quot; |
++ mp_int rem; |
++ |
++ if(d == 0) |
++ return MP_RANGE; |
++ if (d == 1) { |
++ if (r) |
++ *r = 0; |
++ return MP_OKAY; |
++ } |
++ /* could check for power of 2 here, but mp_div_d does that. */ |
++ if (MP_USED(mp) == 1) { |
++ mp_digit n = MP_DIGIT(mp,0); |
++ mp_digit rem; |
++ |
++ q = n / d; |
++ rem = n % d; |
++ MP_DIGIT(mp,0) = q; |
++ if (r) |
++ *r = rem; |
++ return MP_OKAY; |
++ } |
++ |
++ MP_DIGITS(&rem) = 0; |
++ MP_DIGITS(") = 0; |
++ /* Make room for the quotient */ |
++ MP_CHECKOK( mp_init_size(", USED(mp)) ); |
++ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_DIV_WORD) |
++ for(ix = USED(mp) - 1; ix >= 0; ix--) { |
++ w = (w << DIGIT_BIT) | DIGIT(mp, ix); |
++ |
++ if(w >= d) { |
++ q = w / d; |
++ w = w % d; |
++ } else { |
++ q = 0; |
++ } |
++ |
++ s_mp_lshd(", 1); |
++ DIGIT(", 0) = (mp_digit)q; |
++ } |
++#else |
++ { |
++ mp_digit p; |
++#if !defined(MP_ASSEMBLY_DIV_2DX1D) |
++ mp_digit norm; |
++#endif |
++ |
++ MP_CHECKOK( mp_init_copy(&rem, mp) ); |
++ |
++#if !defined(MP_ASSEMBLY_DIV_2DX1D) |
++ MP_DIGIT(", 0) = d; |
++ MP_CHECKOK( s_mp_norm(&rem, ", &norm) ); |
++ if (norm) |
++ d <<= norm; |
++ MP_DIGIT(", 0) = 0; |
++#endif |
++ |
++ p = 0; |
++ for (ix = USED(&rem) - 1; ix >= 0; ix--) { |
++ w = DIGIT(&rem, ix); |
++ |
++ if (p) { |
++ MP_CHECKOK( s_mpv_div_2dx1d(p, w, d, &q, &w) ); |
++ } else if (w >= d) { |
++ q = w / d; |
++ w = w % d; |
++ } else { |
++ q = 0; |
++ } |
++ |
++ MP_CHECKOK( s_mp_lshd(", 1) ); |
++ DIGIT(", 0) = q; |
++ p = w; |
++ } |
++#if !defined(MP_ASSEMBLY_DIV_2DX1D) |
++ if (norm) |
++ w >>= norm; |
++#endif |
++ } |
++#endif |
++ |
++ /* Deliver the remainder, if desired */ |
++ if(r) |
++ *r = (mp_digit)w; |
++ |
++ s_mp_clamp("); |
++ mp_exch(", mp); |
++CLEANUP: |
++ mp_clear("); |
++ mp_clear(&rem); |
++ |
++ return res; |
++} /* end s_mp_div_d() */ |
++ |
++/* }}} */ |
++ |
++ |
++/* }}} */ |
++ |
++/* {{{ Primitive full arithmetic */ |
++ |
++/* {{{ s_mp_add(a, b) */ |
++ |
++/* Compute a = |a| + |b| */ |
++mp_err s_mp_add(mp_int *a, const mp_int *b) /* magnitude addition */ |
++{ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ mp_word w = 0; |
++#else |
++ mp_digit d, sum, carry = 0; |
++#endif |
++ mp_digit *pa, *pb; |
++ mp_size ix; |
++ mp_size used; |
++ mp_err res; |
++ |
++ /* Make sure a has enough precision for the output value */ |
++ if((USED(b) > USED(a)) && (res = s_mp_pad(a, USED(b))) != MP_OKAY) |
++ return res; |
++ |
++ /* |
++ Add up all digits up to the precision of b. If b had initially |
++ the same precision as a, or greater, we took care of it by the |
++ padding step above, so there is no problem. If b had initially |
++ less precision, we'll have to make sure the carry out is duly |
++ propagated upward among the higher-order digits of the sum. |
++ */ |
++ pa = MP_DIGITS(a); |
++ pb = MP_DIGITS(b); |
++ used = MP_USED(b); |
++ for(ix = 0; ix < used; ix++) { |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ w = w + *pa + *pb++; |
++ *pa++ = ACCUM(w); |
++ w = CARRYOUT(w); |
++#else |
++ d = *pa; |
++ sum = d + *pb++; |
++ d = (sum < d); /* detect overflow */ |
++ *pa++ = sum += carry; |
++ carry = d + (sum < carry); /* detect overflow */ |
++#endif |
++ } |
++ |
++ /* If we run out of 'b' digits before we're actually done, make |
++ sure the carries get propagated upward... |
++ */ |
++ used = MP_USED(a); |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ while (w && ix < used) { |
++ w = w + *pa; |
++ *pa++ = ACCUM(w); |
++ w = CARRYOUT(w); |
++ ++ix; |
++ } |
++#else |
++ while (carry && ix < used) { |
++ sum = carry + *pa; |
++ *pa++ = sum; |
++ carry = !sum; |
++ ++ix; |
++ } |
++#endif |
++ |
++ /* If there's an overall carry out, increase precision and include |
++ it. We could have done this initially, but why touch the memory |
++ allocator unless we're sure we have to? |
++ */ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ if (w) { |
++ if((res = s_mp_pad(a, used + 1)) != MP_OKAY) |
++ return res; |
++ |
++ DIGIT(a, ix) = (mp_digit)w; |
++ } |
++#else |
++ if (carry) { |
++ if((res = s_mp_pad(a, used + 1)) != MP_OKAY) |
++ return res; |
++ |
++ DIGIT(a, used) = carry; |
++ } |
++#endif |
++ |
++ return MP_OKAY; |
++} /* end s_mp_add() */ |
++ |
++/* }}} */ |
++ |
++/* Compute c = |a| + |b| */ /* magnitude addition */ |
++mp_err s_mp_add_3arg(const mp_int *a, const mp_int *b, mp_int *c) |
++{ |
++ mp_digit *pa, *pb, *pc; |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ mp_word w = 0; |
++#else |
++ mp_digit sum, carry = 0, d; |
++#endif |
++ mp_size ix; |
++ mp_size used; |
++ mp_err res; |
++ |
++ MP_SIGN(c) = MP_SIGN(a); |
++ if (MP_USED(a) < MP_USED(b)) { |
++ const mp_int *xch = a; |
++ a = b; |
++ b = xch; |
++ } |
++ |
++ /* Make sure a has enough precision for the output value */ |
++ if (MP_OKAY != (res = s_mp_pad(c, MP_USED(a)))) |
++ return res; |
++ |
++ /* |
++ Add up all digits up to the precision of b. If b had initially |
++ the same precision as a, or greater, we took care of it by the |
++ exchange step above, so there is no problem. If b had initially |
++ less precision, we'll have to make sure the carry out is duly |
++ propagated upward among the higher-order digits of the sum. |
++ */ |
++ pa = MP_DIGITS(a); |
++ pb = MP_DIGITS(b); |
++ pc = MP_DIGITS(c); |
++ used = MP_USED(b); |
++ for (ix = 0; ix < used; ix++) { |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ w = w + *pa++ + *pb++; |
++ *pc++ = ACCUM(w); |
++ w = CARRYOUT(w); |
++#else |
++ d = *pa++; |
++ sum = d + *pb++; |
++ d = (sum < d); /* detect overflow */ |
++ *pc++ = sum += carry; |
++ carry = d + (sum < carry); /* detect overflow */ |
++#endif |
++ } |
++ |
++ /* If we run out of 'b' digits before we're actually done, make |
++ sure the carries get propagated upward... |
++ */ |
++ for (used = MP_USED(a); ix < used; ++ix) { |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ w = w + *pa++; |
++ *pc++ = ACCUM(w); |
++ w = CARRYOUT(w); |
++#else |
++ *pc++ = sum = carry + *pa++; |
++ carry = (sum < carry); |
++#endif |
++ } |
++ |
++ /* If there's an overall carry out, increase precision and include |
++ it. We could have done this initially, but why touch the memory |
++ allocator unless we're sure we have to? |
++ */ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ if (w) { |
++ if((res = s_mp_pad(c, used + 1)) != MP_OKAY) |
++ return res; |
++ |
++ DIGIT(c, used) = (mp_digit)w; |
++ ++used; |
++ } |
++#else |
++ if (carry) { |
++ if((res = s_mp_pad(c, used + 1)) != MP_OKAY) |
++ return res; |
++ |
++ DIGIT(c, used) = carry; |
++ ++used; |
++ } |
++#endif |
++ MP_USED(c) = used; |
++ return MP_OKAY; |
++} |
++/* {{{ s_mp_add_offset(a, b, offset) */ |
++ |
++/* Compute a = |a| + ( |b| * (RADIX ** offset) ) */ |
++mp_err s_mp_add_offset(mp_int *a, mp_int *b, mp_size offset) |
++{ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ mp_word w, k = 0; |
++#else |
++ mp_digit d, sum, carry = 0; |
++#endif |
++ mp_size ib; |
++ mp_size ia; |
++ mp_size lim; |
++ mp_err res; |
++ |
++ /* Make sure a has enough precision for the output value */ |
++ lim = MP_USED(b) + offset; |
++ if((lim > USED(a)) && (res = s_mp_pad(a, lim)) != MP_OKAY) |
++ return res; |
++ |
++ /* |
++ Add up all digits up to the precision of b. If b had initially |
++ the same precision as a, or greater, we took care of it by the |
++ padding step above, so there is no problem. If b had initially |
++ less precision, we'll have to make sure the carry out is duly |
++ propagated upward among the higher-order digits of the sum. |
++ */ |
++ lim = USED(b); |
++ for(ib = 0, ia = offset; ib < lim; ib++, ia++) { |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ w = (mp_word)DIGIT(a, ia) + DIGIT(b, ib) + k; |
++ DIGIT(a, ia) = ACCUM(w); |
++ k = CARRYOUT(w); |
++#else |
++ d = MP_DIGIT(a, ia); |
++ sum = d + MP_DIGIT(b, ib); |
++ d = (sum < d); |
++ MP_DIGIT(a,ia) = sum += carry; |
++ carry = d + (sum < carry); |
++#endif |
++ } |
++ |
++ /* If we run out of 'b' digits before we're actually done, make |
++ sure the carries get propagated upward... |
++ */ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ for (lim = MP_USED(a); k && (ia < lim); ++ia) { |
++ w = (mp_word)DIGIT(a, ia) + k; |
++ DIGIT(a, ia) = ACCUM(w); |
++ k = CARRYOUT(w); |
++ } |
++#else |
++ for (lim = MP_USED(a); carry && (ia < lim); ++ia) { |
++ d = MP_DIGIT(a, ia); |
++ MP_DIGIT(a,ia) = sum = d + carry; |
++ carry = (sum < d); |
++ } |
++#endif |
++ |
++ /* If there's an overall carry out, increase precision and include |
++ it. We could have done this initially, but why touch the memory |
++ allocator unless we're sure we have to? |
++ */ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_ADD_WORD) |
++ if(k) { |
++ if((res = s_mp_pad(a, USED(a) + 1)) != MP_OKAY) |
++ return res; |
++ |
++ DIGIT(a, ia) = (mp_digit)k; |
++ } |
++#else |
++ if (carry) { |
++ if((res = s_mp_pad(a, lim + 1)) != MP_OKAY) |
++ return res; |
++ |
++ DIGIT(a, lim) = carry; |
++ } |
++#endif |
++ s_mp_clamp(a); |
++ |
++ return MP_OKAY; |
++ |
++} /* end s_mp_add_offset() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_sub(a, b) */ |
++ |
++/* Compute a = |a| - |b|, assumes |a| >= |b| */ |
++mp_err s_mp_sub(mp_int *a, const mp_int *b) /* magnitude subtract */ |
++{ |
++ mp_digit *pa, *pb, *limit; |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_SUB_WORD) |
++ mp_sword w = 0; |
++#else |
++ mp_digit d, diff, borrow = 0; |
++#endif |
++ |
++ /* |
++ Subtract and propagate borrow. Up to the precision of b, this |
++ accounts for the digits of b; after that, we just make sure the |
++ carries get to the right place. This saves having to pad b out to |
++ the precision of a just to make the loops work right... |
++ */ |
++ pa = MP_DIGITS(a); |
++ pb = MP_DIGITS(b); |
++ limit = pb + MP_USED(b); |
++ while (pb < limit) { |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_SUB_WORD) |
++ w = w + *pa - *pb++; |
++ *pa++ = ACCUM(w); |
++ w >>= MP_DIGIT_BIT; |
++#else |
++ d = *pa; |
++ diff = d - *pb++; |
++ d = (diff > d); /* detect borrow */ |
++ if (borrow && --diff == MP_DIGIT_MAX) |
++ ++d; |
++ *pa++ = diff; |
++ borrow = d; |
++#endif |
++ } |
++ limit = MP_DIGITS(a) + MP_USED(a); |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_SUB_WORD) |
++ while (w && pa < limit) { |
++ w = w + *pa; |
++ *pa++ = ACCUM(w); |
++ w >>= MP_DIGIT_BIT; |
++ } |
++#else |
++ while (borrow && pa < limit) { |
++ d = *pa; |
++ *pa++ = diff = d - borrow; |
++ borrow = (diff > d); |
++ } |
++#endif |
++ |
++ /* Clobber any leading zeroes we created */ |
++ s_mp_clamp(a); |
++ |
++ /* |
++ If there was a borrow out, then |b| > |a| in violation |
++ of our input invariant. We've already done the work, |
++ but we'll at least complain about it... |
++ */ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_SUB_WORD) |
++ return w ? MP_RANGE : MP_OKAY; |
++#else |
++ return borrow ? MP_RANGE : MP_OKAY; |
++#endif |
++} /* end s_mp_sub() */ |
++ |
++/* }}} */ |
++ |
++/* Compute c = |a| - |b|, assumes |a| >= |b| */ /* magnitude subtract */ |
++mp_err s_mp_sub_3arg(const mp_int *a, const mp_int *b, mp_int *c) |
++{ |
++ mp_digit *pa, *pb, *pc; |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_SUB_WORD) |
++ mp_sword w = 0; |
++#else |
++ mp_digit d, diff, borrow = 0; |
++#endif |
++ int ix, limit; |
++ mp_err res; |
++ |
++ MP_SIGN(c) = MP_SIGN(a); |
++ |
++ /* Make sure a has enough precision for the output value */ |
++ if (MP_OKAY != (res = s_mp_pad(c, MP_USED(a)))) |
++ return res; |
++ |
++ /* |
++ Subtract and propagate borrow. Up to the precision of b, this |
++ accounts for the digits of b; after that, we just make sure the |
++ carries get to the right place. This saves having to pad b out to |
++ the precision of a just to make the loops work right... |
++ */ |
++ pa = MP_DIGITS(a); |
++ pb = MP_DIGITS(b); |
++ pc = MP_DIGITS(c); |
++ limit = MP_USED(b); |
++ for (ix = 0; ix < limit; ++ix) { |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_SUB_WORD) |
++ w = w + *pa++ - *pb++; |
++ *pc++ = ACCUM(w); |
++ w >>= MP_DIGIT_BIT; |
++#else |
++ d = *pa++; |
++ diff = d - *pb++; |
++ d = (diff > d); |
++ if (borrow && --diff == MP_DIGIT_MAX) |
++ ++d; |
++ *pc++ = diff; |
++ borrow = d; |
++#endif |
++ } |
++ for (limit = MP_USED(a); ix < limit; ++ix) { |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_SUB_WORD) |
++ w = w + *pa++; |
++ *pc++ = ACCUM(w); |
++ w >>= MP_DIGIT_BIT; |
++#else |
++ d = *pa++; |
++ *pc++ = diff = d - borrow; |
++ borrow = (diff > d); |
++#endif |
++ } |
++ |
++ /* Clobber any leading zeroes we created */ |
++ MP_USED(c) = ix; |
++ s_mp_clamp(c); |
++ |
++ /* |
++ If there was a borrow out, then |b| > |a| in violation |
++ of our input invariant. We've already done the work, |
++ but we'll at least complain about it... |
++ */ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_SUB_WORD) |
++ return w ? MP_RANGE : MP_OKAY; |
++#else |
++ return borrow ? MP_RANGE : MP_OKAY; |
++#endif |
++} |
++/* {{{ s_mp_mul(a, b) */ |
++ |
++/* Compute a = |a| * |b| */ |
++mp_err s_mp_mul(mp_int *a, const mp_int *b) |
++{ |
++ return mp_mul(a, b, a); |
++} /* end s_mp_mul() */ |
++ |
++/* }}} */ |
++ |
++#if defined(MP_USE_UINT_DIGIT) && defined(MP_USE_LONG_LONG_MULTIPLY) |
++/* This trick works on Sparc V8 CPUs with the Workshop compilers. */ |
++#define MP_MUL_DxD(a, b, Phi, Plo) \ |
++ { unsigned long long product = (unsigned long long)a * b; \ |
++ Plo = (mp_digit)product; \ |
++ Phi = (mp_digit)(product >> MP_DIGIT_BIT); } |
++#elif defined(OSF1) |
++#define MP_MUL_DxD(a, b, Phi, Plo) \ |
++ { Plo = asm ("mulq %a0, %a1, %v0", a, b);\ |
++ Phi = asm ("umulh %a0, %a1, %v0", a, b); } |
++#else |
++#define MP_MUL_DxD(a, b, Phi, Plo) \ |
++ { mp_digit a0b1, a1b0; \ |
++ Plo = (a & MP_HALF_DIGIT_MAX) * (b & MP_HALF_DIGIT_MAX); \ |
++ Phi = (a >> MP_HALF_DIGIT_BIT) * (b >> MP_HALF_DIGIT_BIT); \ |
++ a0b1 = (a & MP_HALF_DIGIT_MAX) * (b >> MP_HALF_DIGIT_BIT); \ |
++ a1b0 = (a >> MP_HALF_DIGIT_BIT) * (b & MP_HALF_DIGIT_MAX); \ |
++ a1b0 += a0b1; \ |
++ Phi += a1b0 >> MP_HALF_DIGIT_BIT; \ |
++ if (a1b0 < a0b1) \ |
++ Phi += MP_HALF_RADIX; \ |
++ a1b0 <<= MP_HALF_DIGIT_BIT; \ |
++ Plo += a1b0; \ |
++ if (Plo < a1b0) \ |
++ ++Phi; \ |
++ } |
++#endif |
++ |
++#if !defined(MP_ASSEMBLY_MULTIPLY) |
++/* c = a * b */ |
++void s_mpv_mul_d(const mp_digit *a, mp_size a_len, mp_digit b, mp_digit *c) |
++{ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_MUL_WORD) |
++ mp_digit d = 0; |
++ |
++ /* Inner product: Digits of a */ |
++ while (a_len--) { |
++ mp_word w = ((mp_word)b * *a++) + d; |
++ *c++ = ACCUM(w); |
++ d = CARRYOUT(w); |
++ } |
++ *c = d; |
++#else |
++ mp_digit carry = 0; |
++ while (a_len--) { |
++ mp_digit a_i = *a++; |
++ mp_digit a0b0, a1b1; |
++ |
++ MP_MUL_DxD(a_i, b, a1b1, a0b0); |
++ |
++ a0b0 += carry; |
++ if (a0b0 < carry) |
++ ++a1b1; |
++ *c++ = a0b0; |
++ carry = a1b1; |
++ } |
++ *c = carry; |
++#endif |
++} |
++ |
++/* c += a * b */ |
++void s_mpv_mul_d_add(const mp_digit *a, mp_size a_len, mp_digit b, |
++ mp_digit *c) |
++{ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_MUL_WORD) |
++ mp_digit d = 0; |
++ |
++ /* Inner product: Digits of a */ |
++ while (a_len--) { |
++ mp_word w = ((mp_word)b * *a++) + *c + d; |
++ *c++ = ACCUM(w); |
++ d = CARRYOUT(w); |
++ } |
++ *c = d; |
++#else |
++ mp_digit carry = 0; |
++ while (a_len--) { |
++ mp_digit a_i = *a++; |
++ mp_digit a0b0, a1b1; |
++ |
++ MP_MUL_DxD(a_i, b, a1b1, a0b0); |
++ |
++ a0b0 += carry; |
++ if (a0b0 < carry) |
++ ++a1b1; |
++ a0b0 += a_i = *c; |
++ if (a0b0 < a_i) |
++ ++a1b1; |
++ *c++ = a0b0; |
++ carry = a1b1; |
++ } |
++ *c = carry; |
++#endif |
++} |
++ |
++/* Presently, this is only used by the Montgomery arithmetic code. */ |
++/* c += a * b */ |
++void s_mpv_mul_d_add_prop(const mp_digit *a, mp_size a_len, mp_digit b, mp_digit *c) |
++{ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_MUL_WORD) |
++ mp_digit d = 0; |
++ |
++ /* Inner product: Digits of a */ |
++ while (a_len--) { |
++ mp_word w = ((mp_word)b * *a++) + *c + d; |
++ *c++ = ACCUM(w); |
++ d = CARRYOUT(w); |
++ } |
++ |
++ while (d) { |
++ mp_word w = (mp_word)*c + d; |
++ *c++ = ACCUM(w); |
++ d = CARRYOUT(w); |
++ } |
++#else |
++ mp_digit carry = 0; |
++ while (a_len--) { |
++ mp_digit a_i = *a++; |
++ mp_digit a0b0, a1b1; |
++ |
++ MP_MUL_DxD(a_i, b, a1b1, a0b0); |
++ |
++ a0b0 += carry; |
++ if (a0b0 < carry) |
++ ++a1b1; |
++ |
++ a0b0 += a_i = *c; |
++ if (a0b0 < a_i) |
++ ++a1b1; |
++ |
++ *c++ = a0b0; |
++ carry = a1b1; |
++ } |
++ while (carry) { |
++ mp_digit c_i = *c; |
++ carry += c_i; |
++ *c++ = carry; |
++ carry = carry < c_i; |
++ } |
++#endif |
++} |
++#endif |
++ |
++#if defined(MP_USE_UINT_DIGIT) && defined(MP_USE_LONG_LONG_MULTIPLY) |
++/* This trick works on Sparc V8 CPUs with the Workshop compilers. */ |
++#define MP_SQR_D(a, Phi, Plo) \ |
++ { unsigned long long square = (unsigned long long)a * a; \ |
++ Plo = (mp_digit)square; \ |
++ Phi = (mp_digit)(square >> MP_DIGIT_BIT); } |
++#elif defined(OSF1) |
++#define MP_SQR_D(a, Phi, Plo) \ |
++ { Plo = asm ("mulq %a0, %a0, %v0", a);\ |
++ Phi = asm ("umulh %a0, %a0, %v0", a); } |
++#else |
++#define MP_SQR_D(a, Phi, Plo) \ |
++ { mp_digit Pmid; \ |
++ Plo = (a & MP_HALF_DIGIT_MAX) * (a & MP_HALF_DIGIT_MAX); \ |
++ Phi = (a >> MP_HALF_DIGIT_BIT) * (a >> MP_HALF_DIGIT_BIT); \ |
++ Pmid = (a & MP_HALF_DIGIT_MAX) * (a >> MP_HALF_DIGIT_BIT); \ |
++ Phi += Pmid >> (MP_HALF_DIGIT_BIT - 1); \ |
++ Pmid <<= (MP_HALF_DIGIT_BIT + 1); \ |
++ Plo += Pmid; \ |
++ if (Plo < Pmid) \ |
++ ++Phi; \ |
++ } |
++#endif |
++ |
++#if !defined(MP_ASSEMBLY_SQUARE) |
++/* Add the squares of the digits of a to the digits of b. */ |
++void s_mpv_sqr_add_prop(const mp_digit *pa, mp_size a_len, mp_digit *ps) |
++{ |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_MUL_WORD) |
++ mp_word w; |
++ mp_digit d; |
++ mp_size ix; |
++ |
++ w = 0; |
++#define ADD_SQUARE(n) \ |
++ d = pa[n]; \ |
++ w += (d * (mp_word)d) + ps[2*n]; \ |
++ ps[2*n] = ACCUM(w); \ |
++ w = (w >> DIGIT_BIT) + ps[2*n+1]; \ |
++ ps[2*n+1] = ACCUM(w); \ |
++ w = (w >> DIGIT_BIT) |
++ |
++ for (ix = a_len; ix >= 4; ix -= 4) { |
++ ADD_SQUARE(0); |
++ ADD_SQUARE(1); |
++ ADD_SQUARE(2); |
++ ADD_SQUARE(3); |
++ pa += 4; |
++ ps += 8; |
++ } |
++ if (ix) { |
++ ps += 2*ix; |
++ pa += ix; |
++ switch (ix) { |
++ case 3: ADD_SQUARE(-3); /* FALLTHRU */ |
++ case 2: ADD_SQUARE(-2); /* FALLTHRU */ |
++ case 1: ADD_SQUARE(-1); /* FALLTHRU */ |
++ case 0: break; |
++ } |
++ } |
++ while (w) { |
++ w += *ps; |
++ *ps++ = ACCUM(w); |
++ w = (w >> DIGIT_BIT); |
++ } |
++#else |
++ mp_digit carry = 0; |
++ while (a_len--) { |
++ mp_digit a_i = *pa++; |
++ mp_digit a0a0, a1a1; |
++ |
++ MP_SQR_D(a_i, a1a1, a0a0); |
++ |
++ /* here a1a1 and a0a0 constitute a_i ** 2 */ |
++ a0a0 += carry; |
++ if (a0a0 < carry) |
++ ++a1a1; |
++ |
++ /* now add to ps */ |
++ a0a0 += a_i = *ps; |
++ if (a0a0 < a_i) |
++ ++a1a1; |
++ *ps++ = a0a0; |
++ a1a1 += a_i = *ps; |
++ carry = (a1a1 < a_i); |
++ *ps++ = a1a1; |
++ } |
++ while (carry) { |
++ mp_digit s_i = *ps; |
++ carry += s_i; |
++ *ps++ = carry; |
++ carry = carry < s_i; |
++ } |
++#endif |
++} |
++#endif |
++ |
++#if (defined(MP_NO_MP_WORD) || defined(MP_NO_DIV_WORD)) \ |
++&& !defined(MP_ASSEMBLY_DIV_2DX1D) |
++/* |
++** Divide 64-bit (Nhi,Nlo) by 32-bit divisor, which must be normalized |
++** so its high bit is 1. This code is from NSPR. |
++*/ |
++mp_err s_mpv_div_2dx1d(mp_digit Nhi, mp_digit Nlo, mp_digit divisor, |
++ mp_digit *qp, mp_digit *rp) |
++{ |
++ mp_digit d1, d0, q1, q0; |
++ mp_digit r1, r0, m; |
++ |
++ d1 = divisor >> MP_HALF_DIGIT_BIT; |
++ d0 = divisor & MP_HALF_DIGIT_MAX; |
++ r1 = Nhi % d1; |
++ q1 = Nhi / d1; |
++ m = q1 * d0; |
++ r1 = (r1 << MP_HALF_DIGIT_BIT) | (Nlo >> MP_HALF_DIGIT_BIT); |
++ if (r1 < m) { |
++ q1--, r1 += divisor; |
++ if (r1 >= divisor && r1 < m) { |
++ q1--, r1 += divisor; |
++ } |
++ } |
++ r1 -= m; |
++ r0 = r1 % d1; |
++ q0 = r1 / d1; |
++ m = q0 * d0; |
++ r0 = (r0 << MP_HALF_DIGIT_BIT) | (Nlo & MP_HALF_DIGIT_MAX); |
++ if (r0 < m) { |
++ q0--, r0 += divisor; |
++ if (r0 >= divisor && r0 < m) { |
++ q0--, r0 += divisor; |
++ } |
++ } |
++ if (qp) |
++ *qp = (q1 << MP_HALF_DIGIT_BIT) | q0; |
++ if (rp) |
++ *rp = r0 - m; |
++ return MP_OKAY; |
++} |
++#endif |
++ |
++#if MP_SQUARE |
++/* {{{ s_mp_sqr(a) */ |
++ |
++mp_err s_mp_sqr(mp_int *a) |
++{ |
++ mp_err res; |
++ mp_int tmp; |
++ |
++ if((res = mp_init_size(&tmp, 2 * USED(a))) != MP_OKAY) |
++ return res; |
++ res = mp_sqr(a, &tmp); |
++ if (res == MP_OKAY) { |
++ s_mp_exch(&tmp, a); |
++ } |
++ mp_clear(&tmp); |
++ return res; |
++} |
++ |
++/* }}} */ |
++#endif |
++ |
++/* {{{ s_mp_div(a, b) */ |
++ |
++/* |
++ s_mp_div(a, b) |
++ |
++ Compute a = a / b and b = a mod b. Assumes b > a. |
++ */ |
++ |
++mp_err s_mp_div(mp_int *rem, /* i: dividend, o: remainder */ |
++ mp_int *div, /* i: divisor */ |
++ mp_int *quot) /* i: 0; o: quotient */ |
++{ |
++ mp_int part, t; |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_DIV_WORD) |
++ mp_word q_msd; |
++#else |
++ mp_digit q_msd; |
++#endif |
++ mp_err res; |
++ mp_digit d; |
++ mp_digit div_msd; |
++ int ix; |
++ |
++ if(mp_cmp_z(div) == 0) |
++ return MP_RANGE; |
++ |
++ /* Shortcut if divisor is power of two */ |
++ if((ix = s_mp_ispow2(div)) >= 0) { |
++ MP_CHECKOK( mp_copy(rem, quot) ); |
++ s_mp_div_2d(quot, (mp_digit)ix); |
++ s_mp_mod_2d(rem, (mp_digit)ix); |
++ |
++ return MP_OKAY; |
++ } |
++ |
++ DIGITS(&t) = 0; |
++ MP_SIGN(rem) = ZPOS; |
++ MP_SIGN(div) = ZPOS; |
++ |
++ /* A working temporary for division */ |
++ MP_CHECKOK( mp_init_size(&t, MP_ALLOC(rem))); |
++ |
++ /* Normalize to optimize guessing */ |
++ MP_CHECKOK( s_mp_norm(rem, div, &d) ); |
++ |
++ part = *rem; |
++ |
++ /* Perform the division itself...woo! */ |
++ MP_USED(quot) = MP_ALLOC(quot); |
++ |
++ /* Find a partial substring of rem which is at least div */ |
++ /* If we didn't find one, we're finished dividing */ |
++ while (MP_USED(rem) > MP_USED(div) || s_mp_cmp(rem, div) >= 0) { |
++ int i; |
++ int unusedRem; |
++ |
++ unusedRem = MP_USED(rem) - MP_USED(div); |
++ MP_DIGITS(&part) = MP_DIGITS(rem) + unusedRem; |
++ MP_ALLOC(&part) = MP_ALLOC(rem) - unusedRem; |
++ MP_USED(&part) = MP_USED(div); |
++ if (s_mp_cmp(&part, div) < 0) { |
++ -- unusedRem; |
++#if MP_ARGCHK == 2 |
++ assert(unusedRem >= 0); |
++#endif |
++ -- MP_DIGITS(&part); |
++ ++ MP_USED(&part); |
++ ++ MP_ALLOC(&part); |
++ } |
++ |
++ /* Compute a guess for the next quotient digit */ |
++ q_msd = MP_DIGIT(&part, MP_USED(&part) - 1); |
++ div_msd = MP_DIGIT(div, MP_USED(div) - 1); |
++ if (q_msd >= div_msd) { |
++ q_msd = 1; |
++ } else if (MP_USED(&part) > 1) { |
++#if !defined(MP_NO_MP_WORD) && !defined(MP_NO_DIV_WORD) |
++ q_msd = (q_msd << MP_DIGIT_BIT) | MP_DIGIT(&part, MP_USED(&part) - 2); |
++ q_msd /= div_msd; |
++ if (q_msd == RADIX) |
++ --q_msd; |
++#else |
++ mp_digit r; |
++ MP_CHECKOK( s_mpv_div_2dx1d(q_msd, MP_DIGIT(&part, MP_USED(&part) - 2), |
++ div_msd, &q_msd, &r) ); |
++#endif |
++ } else { |
++ q_msd = 0; |
++ } |
++#if MP_ARGCHK == 2 |
++ assert(q_msd > 0); /* This case should never occur any more. */ |
++#endif |
++ if (q_msd <= 0) |
++ break; |
++ |
++ /* See what that multiplies out to */ |
++ mp_copy(div, &t); |
++ MP_CHECKOK( s_mp_mul_d(&t, (mp_digit)q_msd) ); |
++ |
++ /* |
++ If it's too big, back it off. We should not have to do this |
++ more than once, or, in rare cases, twice. Knuth describes a |
++ method by which this could be reduced to a maximum of once, but |
++ I didn't implement that here. |
++ * When using s_mpv_div_2dx1d, we may have to do this 3 times. |
++ */ |
++ for (i = 4; s_mp_cmp(&t, &part) > 0 && i > 0; --i) { |
++ --q_msd; |
++ s_mp_sub(&t, div); /* t -= div */ |
++ } |
++ if (i < 0) { |
++ res = MP_RANGE; |
++ goto CLEANUP; |
++ } |
++ |
++ /* At this point, q_msd should be the right next digit */ |
++ MP_CHECKOK( s_mp_sub(&part, &t) ); /* part -= t */ |
++ s_mp_clamp(rem); |
++ |
++ /* |
++ Include the digit in the quotient. We allocated enough memory |
++ for any quotient we could ever possibly get, so we should not |
++ have to check for failures here |
++ */ |
++ MP_DIGIT(quot, unusedRem) = (mp_digit)q_msd; |
++ } |
++ |
++ /* Denormalize remainder */ |
++ if (d) { |
++ s_mp_div_2d(rem, d); |
++ } |
++ |
++ s_mp_clamp(quot); |
++ |
++CLEANUP: |
++ mp_clear(&t); |
++ |
++ return res; |
++ |
++} /* end s_mp_div() */ |
++ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_2expt(a, k) */ |
++ |
++mp_err s_mp_2expt(mp_int *a, mp_digit k) |
++{ |
++ mp_err res; |
++ mp_size dig, bit; |
++ |
++ dig = k / DIGIT_BIT; |
++ bit = k % DIGIT_BIT; |
++ |
++ mp_zero(a); |
++ if((res = s_mp_pad(a, dig + 1)) != MP_OKAY) |
++ return res; |
++ |
++ DIGIT(a, dig) |= ((mp_digit)1 << bit); |
++ |
++ return MP_OKAY; |
++ |
++} /* end s_mp_2expt() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_reduce(x, m, mu) */ |
++ |
++/* |
++ Compute Barrett reduction, x (mod m), given a precomputed value for |
++ mu = b^2k / m, where b = RADIX and k = #digits(m). This should be |
++ faster than straight division, when many reductions by the same |
++ value of m are required (such as in modular exponentiation). This |
++ can nearly halve the time required to do modular exponentiation, |
++ as compared to using the full integer divide to reduce. |
++ |
++ This algorithm was derived from the _Handbook of Applied |
++ Cryptography_ by Menezes, Oorschot and VanStone, Ch. 14, |
++ pp. 603-604. |
++ */ |
++ |
++mp_err s_mp_reduce(mp_int *x, const mp_int *m, const mp_int *mu) |
++{ |
++ mp_int q; |
++ mp_err res; |
++ |
++ if((res = mp_init_copy(&q, x)) != MP_OKAY) |
++ return res; |
++ |
++ s_mp_rshd(&q, USED(m) - 1); /* q1 = x / b^(k-1) */ |
++ s_mp_mul(&q, mu); /* q2 = q1 * mu */ |
++ s_mp_rshd(&q, USED(m) + 1); /* q3 = q2 / b^(k+1) */ |
++ |
++ /* x = x mod b^(k+1), quick (no division) */ |
++ s_mp_mod_2d(x, DIGIT_BIT * (USED(m) + 1)); |
++ |
++ /* q = q * m mod b^(k+1), quick (no division) */ |
++ s_mp_mul(&q, m); |
++ s_mp_mod_2d(&q, DIGIT_BIT * (USED(m) + 1)); |
++ |
++ /* x = x - q */ |
++ if((res = mp_sub(x, &q, x)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ /* If x < 0, add b^(k+1) to it */ |
++ if(mp_cmp_z(x) < 0) { |
++ mp_set(&q, 1); |
++ if((res = s_mp_lshd(&q, USED(m) + 1)) != MP_OKAY) |
++ goto CLEANUP; |
++ if((res = mp_add(x, &q, x)) != MP_OKAY) |
++ goto CLEANUP; |
++ } |
++ |
++ /* Back off if it's too big */ |
++ while(mp_cmp(x, m) >= 0) { |
++ if((res = s_mp_sub(x, m)) != MP_OKAY) |
++ break; |
++ } |
++ |
++ CLEANUP: |
++ mp_clear(&q); |
++ |
++ return res; |
++ |
++} /* end s_mp_reduce() */ |
++ |
++/* }}} */ |
++ |
++/* }}} */ |
++ |
++/* {{{ Primitive comparisons */ |
++ |
++/* {{{ s_mp_cmp(a, b) */ |
++ |
++/* Compare |a| <=> |b|, return 0 if equal, <0 if a<b, >0 if a>b */ |
++int s_mp_cmp(const mp_int *a, const mp_int *b) |
++{ |
++ mp_size used_a = MP_USED(a); |
++ { |
++ mp_size used_b = MP_USED(b); |
++ |
++ if (used_a > used_b) |
++ goto IS_GT; |
++ if (used_a < used_b) |
++ goto IS_LT; |
++ } |
++ { |
++ mp_digit *pa, *pb; |
++ mp_digit da = 0, db = 0; |
++ |
++#define CMP_AB(n) if ((da = pa[n]) != (db = pb[n])) goto done |
++ |
++ pa = MP_DIGITS(a) + used_a; |
++ pb = MP_DIGITS(b) + used_a; |
++ while (used_a >= 4) { |
++ pa -= 4; |
++ pb -= 4; |
++ used_a -= 4; |
++ CMP_AB(3); |
++ CMP_AB(2); |
++ CMP_AB(1); |
++ CMP_AB(0); |
++ } |
++ while (used_a-- > 0 && ((da = *--pa) == (db = *--pb))) |
++ /* do nothing */; |
++done: |
++ if (da > db) |
++ goto IS_GT; |
++ if (da < db) |
++ goto IS_LT; |
++ } |
++ return MP_EQ; |
++IS_LT: |
++ return MP_LT; |
++IS_GT: |
++ return MP_GT; |
++} /* end s_mp_cmp() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_cmp_d(a, d) */ |
++ |
++/* Compare |a| <=> d, return 0 if equal, <0 if a<d, >0 if a>d */ |
++int s_mp_cmp_d(const mp_int *a, mp_digit d) |
++{ |
++ if(USED(a) > 1) |
++ return MP_GT; |
++ |
++ if(DIGIT(a, 0) < d) |
++ return MP_LT; |
++ else if(DIGIT(a, 0) > d) |
++ return MP_GT; |
++ else |
++ return MP_EQ; |
++ |
++} /* end s_mp_cmp_d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_ispow2(v) */ |
++ |
++/* |
++ Returns -1 if the value is not a power of two; otherwise, it returns |
++ k such that v = 2^k, i.e. lg(v). |
++ */ |
++int s_mp_ispow2(const mp_int *v) |
++{ |
++ mp_digit d; |
++ int extra = 0, ix; |
++ |
++ ix = MP_USED(v) - 1; |
++ d = MP_DIGIT(v, ix); /* most significant digit of v */ |
++ |
++ extra = s_mp_ispow2d(d); |
++ if (extra < 0 || ix == 0) |
++ return extra; |
++ |
++ while (--ix >= 0) { |
++ if (DIGIT(v, ix) != 0) |
++ return -1; /* not a power of two */ |
++ extra += MP_DIGIT_BIT; |
++ } |
++ |
++ return extra; |
++ |
++} /* end s_mp_ispow2() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_ispow2d(d) */ |
++ |
++int s_mp_ispow2d(mp_digit d) |
++{ |
++ if ((d != 0) && ((d & (d-1)) == 0)) { /* d is a power of 2 */ |
++ int pow = 0; |
++#if defined (MP_USE_UINT_DIGIT) |
++ if (d & 0xffff0000U) |
++ pow += 16; |
++ if (d & 0xff00ff00U) |
++ pow += 8; |
++ if (d & 0xf0f0f0f0U) |
++ pow += 4; |
++ if (d & 0xccccccccU) |
++ pow += 2; |
++ if (d & 0xaaaaaaaaU) |
++ pow += 1; |
++#elif defined(MP_USE_LONG_LONG_DIGIT) |
++ if (d & 0xffffffff00000000ULL) |
++ pow += 32; |
++ if (d & 0xffff0000ffff0000ULL) |
++ pow += 16; |
++ if (d & 0xff00ff00ff00ff00ULL) |
++ pow += 8; |
++ if (d & 0xf0f0f0f0f0f0f0f0ULL) |
++ pow += 4; |
++ if (d & 0xccccccccccccccccULL) |
++ pow += 2; |
++ if (d & 0xaaaaaaaaaaaaaaaaULL) |
++ pow += 1; |
++#elif defined(MP_USE_LONG_DIGIT) |
++ if (d & 0xffffffff00000000UL) |
++ pow += 32; |
++ if (d & 0xffff0000ffff0000UL) |
++ pow += 16; |
++ if (d & 0xff00ff00ff00ff00UL) |
++ pow += 8; |
++ if (d & 0xf0f0f0f0f0f0f0f0UL) |
++ pow += 4; |
++ if (d & 0xccccccccccccccccUL) |
++ pow += 2; |
++ if (d & 0xaaaaaaaaaaaaaaaaUL) |
++ pow += 1; |
++#else |
++#error "unknown type for mp_digit" |
++#endif |
++ return pow; |
++ } |
++ return -1; |
++ |
++} /* end s_mp_ispow2d() */ |
++ |
++/* }}} */ |
++ |
++/* }}} */ |
++ |
++/* {{{ Primitive I/O helpers */ |
++ |
++/* {{{ s_mp_tovalue(ch, r) */ |
++ |
++/* |
++ Convert the given character to its digit value, in the given radix. |
++ If the given character is not understood in the given radix, -1 is |
++ returned. Otherwise the digit's numeric value is returned. |
++ |
++ The results will be odd if you use a radix < 2 or > 62, you are |
++ expected to know what you're up to. |
++ */ |
++int s_mp_tovalue(char ch, int r) |
++{ |
++ int val, xch; |
++ |
++ if(r > 36) |
++ xch = ch; |
++ else |
++ xch = toupper(ch); |
++ |
++ if(isdigit(xch)) |
++ val = xch - '0'; |
++ else if(isupper(xch)) |
++ val = xch - 'A' + 10; |
++ else if(islower(xch)) |
++ val = xch - 'a' + 36; |
++ else if(xch == '+') |
++ val = 62; |
++ else if(xch == '/') |
++ val = 63; |
++ else |
++ return -1; |
++ |
++ if(val < 0 || val >= r) |
++ return -1; |
++ |
++ return val; |
++ |
++} /* end s_mp_tovalue() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_todigit(val, r, low) */ |
++ |
++/* |
++ Convert val to a radix-r digit, if possible. If val is out of range |
++ for r, returns zero. Otherwise, returns an ASCII character denoting |
++ the value in the given radix. |
++ |
++ The results may be odd if you use a radix < 2 or > 64, you are |
++ expected to know what you're doing. |
++ */ |
++ |
++char s_mp_todigit(mp_digit val, int r, int low) |
++{ |
++ char ch; |
++ |
++ if(val >= r) |
++ return 0; |
++ |
++ ch = s_dmap_1[val]; |
++ |
++ if(r <= 36 && low) |
++ ch = tolower(ch); |
++ |
++ return ch; |
++ |
++} /* end s_mp_todigit() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ s_mp_outlen(bits, radix) */ |
++ |
++/* |
++ Return an estimate for how long a string is needed to hold a radix |
++ r representation of a number with 'bits' significant bits, plus an |
++ extra for a zero terminator (assuming C style strings here) |
++ */ |
++int s_mp_outlen(int bits, int r) |
++{ |
++ return (int)((double)bits * LOG_V_2(r) + 1.5) + 1; |
++ |
++} /* end s_mp_outlen() */ |
++ |
++/* }}} */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mp_read_unsigned_octets(mp, str, len) */ |
++/* mp_read_unsigned_octets(mp, str, len) |
++ Read in a raw value (base 256) into the given mp_int |
++ No sign bit, number is positive. Leading zeros ignored. |
++ */ |
++ |
++mp_err |
++mp_read_unsigned_octets(mp_int *mp, const unsigned char *str, mp_size len) |
++{ |
++ int count; |
++ mp_err res; |
++ mp_digit d; |
++ |
++ ARGCHK(mp != NULL && str != NULL && len > 0, MP_BADARG); |
++ |
++ mp_zero(mp); |
++ |
++ count = len % sizeof(mp_digit); |
++ if (count) { |
++ for (d = 0; count-- > 0; --len) { |
++ d = (d << 8) | *str++; |
++ } |
++ MP_DIGIT(mp, 0) = d; |
++ } |
++ |
++ /* Read the rest of the digits */ |
++ for(; len > 0; len -= sizeof(mp_digit)) { |
++ for (d = 0, count = sizeof(mp_digit); count > 0; --count) { |
++ d = (d << 8) | *str++; |
++ } |
++ if (MP_EQ == mp_cmp_z(mp)) { |
++ if (!d) |
++ continue; |
++ } else { |
++ if((res = s_mp_lshd(mp, 1)) != MP_OKAY) |
++ return res; |
++ } |
++ MP_DIGIT(mp, 0) = d; |
++ } |
++ return MP_OKAY; |
++} /* end mp_read_unsigned_octets() */ |
++/* }}} */ |
++ |
++/* {{{ mp_unsigned_octet_size(mp) */ |
++int |
++mp_unsigned_octet_size(const mp_int *mp) |
++{ |
++ int bytes; |
++ int ix; |
++ mp_digit d = 0; |
++ |
++ ARGCHK(mp != NULL, MP_BADARG); |
++ ARGCHK(MP_ZPOS == SIGN(mp), MP_BADARG); |
++ |
++ bytes = (USED(mp) * sizeof(mp_digit)); |
++ |
++ /* subtract leading zeros. */ |
++ /* Iterate over each digit... */ |
++ for(ix = USED(mp) - 1; ix >= 0; ix--) { |
++ d = DIGIT(mp, ix); |
++ if (d) |
++ break; |
++ bytes -= sizeof(d); |
++ } |
++ if (!bytes) |
++ return 1; |
++ |
++ /* Have MSD, check digit bytes, high order first */ |
++ for(ix = sizeof(mp_digit) - 1; ix >= 0; ix--) { |
++ unsigned char x = (unsigned char)(d >> (ix * CHAR_BIT)); |
++ if (x) |
++ break; |
++ --bytes; |
++ } |
++ return bytes; |
++} /* end mp_unsigned_octet_size() */ |
++/* }}} */ |
++ |
++/* {{{ mp_to_unsigned_octets(mp, str) */ |
++/* output a buffer of big endian octets no longer than specified. */ |
++mp_err |
++mp_to_unsigned_octets(const mp_int *mp, unsigned char *str, mp_size maxlen) |
++{ |
++ int ix, pos = 0; |
++ int bytes; |
++ |
++ ARGCHK(mp != NULL && str != NULL && !SIGN(mp), MP_BADARG); |
++ |
++ bytes = mp_unsigned_octet_size(mp); |
++ ARGCHK(bytes <= maxlen, MP_BADARG); |
++ |
++ /* Iterate over each digit... */ |
++ for(ix = USED(mp) - 1; ix >= 0; ix--) { |
++ mp_digit d = DIGIT(mp, ix); |
++ int jx; |
++ |
++ /* Unpack digit bytes, high order first */ |
++ for(jx = sizeof(mp_digit) - 1; jx >= 0; jx--) { |
++ unsigned char x = (unsigned char)(d >> (jx * CHAR_BIT)); |
++ if (!pos && !x) /* suppress leading zeros */ |
++ continue; |
++ str[pos++] = x; |
++ } |
++ } |
++ if (!pos) |
++ str[pos++] = 0; |
++ return pos; |
++} /* end mp_to_unsigned_octets() */ |
++/* }}} */ |
++ |
++/* {{{ mp_to_signed_octets(mp, str) */ |
++/* output a buffer of big endian octets no longer than specified. */ |
++mp_err |
++mp_to_signed_octets(const mp_int *mp, unsigned char *str, mp_size maxlen) |
++{ |
++ int ix, pos = 0; |
++ int bytes; |
++ |
++ ARGCHK(mp != NULL && str != NULL && !SIGN(mp), MP_BADARG); |
++ |
++ bytes = mp_unsigned_octet_size(mp); |
++ ARGCHK(bytes <= maxlen, MP_BADARG); |
++ |
++ /* Iterate over each digit... */ |
++ for(ix = USED(mp) - 1; ix >= 0; ix--) { |
++ mp_digit d = DIGIT(mp, ix); |
++ int jx; |
++ |
++ /* Unpack digit bytes, high order first */ |
++ for(jx = sizeof(mp_digit) - 1; jx >= 0; jx--) { |
++ unsigned char x = (unsigned char)(d >> (jx * CHAR_BIT)); |
++ if (!pos) { |
++ if (!x) /* suppress leading zeros */ |
++ continue; |
++ if (x & 0x80) { /* add one leading zero to make output positive. */ |
++ ARGCHK(bytes + 1 <= maxlen, MP_BADARG); |
++ if (bytes + 1 > maxlen) |
++ return MP_BADARG; |
++ str[pos++] = 0; |
++ } |
++ } |
++ str[pos++] = x; |
++ } |
++ } |
++ if (!pos) |
++ str[pos++] = 0; |
++ return pos; |
++} /* end mp_to_signed_octets() */ |
++/* }}} */ |
++ |
++/* {{{ mp_to_fixlen_octets(mp, str) */ |
++/* output a buffer of big endian octets exactly as long as requested. */ |
++mp_err |
++mp_to_fixlen_octets(const mp_int *mp, unsigned char *str, mp_size length) |
++{ |
++ int ix, pos = 0; |
++ int bytes; |
++ |
++ ARGCHK(mp != NULL && str != NULL && !SIGN(mp), MP_BADARG); |
++ |
++ bytes = mp_unsigned_octet_size(mp); |
++ ARGCHK(bytes <= length, MP_BADARG); |
++ |
++ /* place any needed leading zeros */ |
++ for (;length > bytes; --length) { |
++ *str++ = 0; |
++ } |
++ |
++ /* Iterate over each digit... */ |
++ for(ix = USED(mp) - 1; ix >= 0; ix--) { |
++ mp_digit d = DIGIT(mp, ix); |
++ int jx; |
++ |
++ /* Unpack digit bytes, high order first */ |
++ for(jx = sizeof(mp_digit) - 1; jx >= 0; jx--) { |
++ unsigned char x = (unsigned char)(d >> (jx * CHAR_BIT)); |
++ if (!pos && !x) /* suppress leading zeros */ |
++ continue; |
++ str[pos++] = x; |
++ } |
++ } |
++ if (!pos) |
++ str[pos++] = 0; |
++ return MP_OKAY; |
++} /* end mp_to_fixlen_octets() */ |
++/* }}} */ |
++ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* HERE THERE BE DRAGONS */ |
++ |
+diff --git a/net/third_party/nss/ssl/mpi/mpi.h b/net/third_party/nss/ssl/mpi/mpi.h |
+new file mode 100644 |
+index 0000000..79503f3 |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/mpi/mpi.h |
+@@ -0,0 +1,340 @@ |
++/* |
++ * mpi.h |
++ * |
++ * Arbitrary precision integer arithmetic library |
++ * |
++ * ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Original Code is the MPI Arbitrary Precision Integer Arithmetic library. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Michael J. Fromberger. |
++ * Portions created by the Initial Developer are Copyright (C) 1998 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * Netscape Communications Corporation |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++/* $Id: mpi.h,v 1.23 2008/12/04 18:16:34 rrelyea%redhat.com Exp $ */ |
++ |
++#ifndef _H_MPI_ |
++#define _H_MPI_ |
++ |
++#include "mpi-config.h" |
++ |
++#if MP_DEBUG |
++#undef MP_IOFUNC |
++#define MP_IOFUNC 1 |
++#endif |
++ |
++#if MP_IOFUNC |
++#include <stdio.h> |
++#include <ctype.h> |
++#endif |
++ |
++#include <limits.h> |
++ |
++#if defined(BSDI) |
++#undef ULLONG_MAX |
++#endif |
++ |
++#if defined( macintosh ) |
++#include <Types.h> |
++#elif defined( _WIN32_WCE) |
++/* #include <sys/types.h> What do we need here ?? */ |
++#else |
++#include <sys/types.h> |
++#endif |
++ |
++#define MP_NEG 1 |
++#define MP_ZPOS 0 |
++ |
++#define MP_OKAY 0 /* no error, all is well */ |
++#define MP_YES 0 /* yes (boolean result) */ |
++#define MP_NO -1 /* no (boolean result) */ |
++#define MP_MEM -2 /* out of memory */ |
++#define MP_RANGE -3 /* argument out of range */ |
++#define MP_BADARG -4 /* invalid parameter */ |
++#define MP_UNDEF -5 /* answer is undefined */ |
++#define MP_LAST_CODE MP_UNDEF |
++ |
++typedef unsigned int mp_sign; |
++typedef unsigned int mp_size; |
++typedef int mp_err; |
++ |
++#define MP_32BIT_MAX 4294967295U |
++ |
++#if !defined(ULONG_MAX) |
++#error "ULONG_MAX not defined" |
++#elif !defined(UINT_MAX) |
++#error "UINT_MAX not defined" |
++#elif !defined(USHRT_MAX) |
++#error "USHRT_MAX not defined" |
++#endif |
++ |
++#if defined(ULONG_LONG_MAX) /* GCC, HPUX */ |
++#define MP_ULONG_LONG_MAX ULONG_LONG_MAX |
++#elif defined(ULLONG_MAX) /* Solaris */ |
++#define MP_ULONG_LONG_MAX ULLONG_MAX |
++/* MP_ULONG_LONG_MAX was defined to be ULLONG_MAX */ |
++#elif defined(ULONGLONG_MAX) /* IRIX, AIX */ |
++#define MP_ULONG_LONG_MAX ULONGLONG_MAX |
++#endif |
++ |
++/* We only use unsigned long for mp_digit iff long is more than 32 bits. */ |
++#if !defined(MP_USE_UINT_DIGIT) && ULONG_MAX > MP_32BIT_MAX |
++typedef unsigned long mp_digit; |
++#define MP_DIGIT_MAX ULONG_MAX |
++#define MP_DIGIT_FMT "%016lX" /* printf() format for 1 digit */ |
++#define MP_HALF_DIGIT_MAX UINT_MAX |
++#undef MP_NO_MP_WORD |
++#define MP_NO_MP_WORD 1 |
++#undef MP_USE_LONG_DIGIT |
++#define MP_USE_LONG_DIGIT 1 |
++#undef MP_USE_LONG_LONG_DIGIT |
++ |
++#elif !defined(MP_USE_UINT_DIGIT) && defined(MP_ULONG_LONG_MAX) |
++typedef unsigned long long mp_digit; |
++#define MP_DIGIT_MAX MP_ULONG_LONG_MAX |
++#define MP_DIGIT_FMT "%016llX" /* printf() format for 1 digit */ |
++#define MP_HALF_DIGIT_MAX UINT_MAX |
++#undef MP_NO_MP_WORD |
++#define MP_NO_MP_WORD 1 |
++#undef MP_USE_LONG_LONG_DIGIT |
++#define MP_USE_LONG_LONG_DIGIT 1 |
++#undef MP_USE_LONG_DIGIT |
++ |
++#else |
++typedef unsigned int mp_digit; |
++#define MP_DIGIT_MAX UINT_MAX |
++#define MP_DIGIT_FMT "%08X" /* printf() format for 1 digit */ |
++#define MP_HALF_DIGIT_MAX USHRT_MAX |
++#undef MP_USE_UINT_DIGIT |
++#define MP_USE_UINT_DIGIT 1 |
++#undef MP_USE_LONG_LONG_DIGIT |
++#undef MP_USE_LONG_DIGIT |
++#endif |
++ |
++#if !defined(MP_NO_MP_WORD) |
++#if defined(MP_USE_UINT_DIGIT) && \ |
++ (defined(MP_ULONG_LONG_MAX) || (ULONG_MAX > UINT_MAX)) |
++ |
++#if (ULONG_MAX > UINT_MAX) |
++typedef unsigned long mp_word; |
++typedef long mp_sword; |
++#define MP_WORD_MAX ULONG_MAX |
++ |
++#else |
++typedef unsigned long long mp_word; |
++typedef long long mp_sword; |
++#define MP_WORD_MAX MP_ULONG_LONG_MAX |
++#endif |
++ |
++#else |
++#define MP_NO_MP_WORD 1 |
++#endif |
++#endif /* !defined(MP_NO_MP_WORD) */ |
++ |
++#if !defined(MP_WORD_MAX) && defined(MP_DEFINE_SMALL_WORD) |
++typedef unsigned int mp_word; |
++typedef int mp_sword; |
++#define MP_WORD_MAX UINT_MAX |
++#endif |
++ |
++#define MP_DIGIT_BIT (CHAR_BIT*sizeof(mp_digit)) |
++#define MP_WORD_BIT (CHAR_BIT*sizeof(mp_word)) |
++#define MP_RADIX (1+(mp_word)MP_DIGIT_MAX) |
++ |
++#define MP_HALF_DIGIT_BIT (MP_DIGIT_BIT/2) |
++#define MP_HALF_RADIX (1+(mp_digit)MP_HALF_DIGIT_MAX) |
++/* MP_HALF_RADIX really ought to be called MP_SQRT_RADIX, but it's named |
++** MP_HALF_RADIX because it's the radix for MP_HALF_DIGITs, and it's |
++** consistent with the other _HALF_ names. |
++*/ |
++ |
++ |
++/* Macros for accessing the mp_int internals */ |
++#define MP_SIGN(MP) ((MP)->sign) |
++#define MP_USED(MP) ((MP)->used) |
++#define MP_ALLOC(MP) ((MP)->alloc) |
++#define MP_DIGITS(MP) ((MP)->dp) |
++#define MP_DIGIT(MP,N) (MP)->dp[(N)] |
++ |
++/* This defines the maximum I/O base (minimum is 2) */ |
++#define MP_MAX_RADIX 64 |
++ |
++typedef struct { |
++ mp_sign sign; /* sign of this quantity */ |
++ mp_size alloc; /* how many digits allocated */ |
++ mp_size used; /* how many digits used */ |
++ mp_digit *dp; /* the digits themselves */ |
++} mp_int; |
++ |
++/* Default precision */ |
++mp_size mp_get_prec(void); |
++void mp_set_prec(mp_size prec); |
++ |
++/* Memory management */ |
++mp_err mp_init(mp_int *mp); |
++mp_err mp_init_size(mp_int *mp, mp_size prec); |
++mp_err mp_init_copy(mp_int *mp, const mp_int *from); |
++mp_err mp_copy(const mp_int *from, mp_int *to); |
++void mp_exch(mp_int *mp1, mp_int *mp2); |
++void mp_clear(mp_int *mp); |
++void mp_zero(mp_int *mp); |
++void mp_set(mp_int *mp, mp_digit d); |
++mp_err mp_set_int(mp_int *mp, long z); |
++#define mp_set_long(mp,z) mp_set_int(mp,z) |
++mp_err mp_set_ulong(mp_int *mp, unsigned long z); |
++ |
++/* Single digit arithmetic */ |
++mp_err mp_add_d(const mp_int *a, mp_digit d, mp_int *b); |
++mp_err mp_sub_d(const mp_int *a, mp_digit d, mp_int *b); |
++mp_err mp_mul_d(const mp_int *a, mp_digit d, mp_int *b); |
++mp_err mp_mul_2(const mp_int *a, mp_int *c); |
++mp_err mp_div_d(const mp_int *a, mp_digit d, mp_int *q, mp_digit *r); |
++mp_err mp_div_2(const mp_int *a, mp_int *c); |
++mp_err mp_expt_d(const mp_int *a, mp_digit d, mp_int *c); |
++ |
++/* Sign manipulations */ |
++mp_err mp_abs(const mp_int *a, mp_int *b); |
++mp_err mp_neg(const mp_int *a, mp_int *b); |
++ |
++/* Full arithmetic */ |
++mp_err mp_add(const mp_int *a, const mp_int *b, mp_int *c); |
++mp_err mp_sub(const mp_int *a, const mp_int *b, mp_int *c); |
++mp_err mp_mul(const mp_int *a, const mp_int *b, mp_int *c); |
++#if MP_SQUARE |
++mp_err mp_sqr(const mp_int *a, mp_int *b); |
++#else |
++#define mp_sqr(a, b) mp_mul(a, a, b) |
++#endif |
++mp_err mp_div(const mp_int *a, const mp_int *b, mp_int *q, mp_int *r); |
++mp_err mp_div_2d(const mp_int *a, mp_digit d, mp_int *q, mp_int *r); |
++mp_err mp_expt(mp_int *a, mp_int *b, mp_int *c); |
++mp_err mp_2expt(mp_int *a, mp_digit k); |
++mp_err mp_sqrt(const mp_int *a, mp_int *b); |
++ |
++/* Modular arithmetic */ |
++#if MP_MODARITH |
++mp_err mp_mod(const mp_int *a, const mp_int *m, mp_int *c); |
++mp_err mp_mod_d(const mp_int *a, mp_digit d, mp_digit *c); |
++mp_err mp_addmod(const mp_int *a, const mp_int *b, const mp_int *m, mp_int *c); |
++mp_err mp_submod(const mp_int *a, const mp_int *b, const mp_int *m, mp_int *c); |
++mp_err mp_mulmod(const mp_int *a, const mp_int *b, const mp_int *m, mp_int *c); |
++#if MP_SQUARE |
++mp_err mp_sqrmod(const mp_int *a, const mp_int *m, mp_int *c); |
++#else |
++#define mp_sqrmod(a, m, c) mp_mulmod(a, a, m, c) |
++#endif |
++mp_err mp_exptmod(const mp_int *a, const mp_int *b, const mp_int *m, mp_int *c); |
++mp_err mp_exptmod_d(const mp_int *a, mp_digit d, const mp_int *m, mp_int *c); |
++#endif /* MP_MODARITH */ |
++ |
++/* Comparisons */ |
++int mp_cmp_z(const mp_int *a); |
++int mp_cmp_d(const mp_int *a, mp_digit d); |
++int mp_cmp(const mp_int *a, const mp_int *b); |
++int mp_cmp_mag(mp_int *a, mp_int *b); |
++int mp_cmp_int(const mp_int *a, long z); |
++int mp_isodd(const mp_int *a); |
++int mp_iseven(const mp_int *a); |
++ |
++/* Number theoretic */ |
++#if MP_NUMTH |
++mp_err mp_gcd(mp_int *a, mp_int *b, mp_int *c); |
++mp_err mp_lcm(mp_int *a, mp_int *b, mp_int *c); |
++mp_err mp_xgcd(const mp_int *a, const mp_int *b, mp_int *g, mp_int *x, mp_int *y); |
++mp_err mp_invmod(const mp_int *a, const mp_int *m, mp_int *c); |
++mp_err mp_invmod_xgcd(const mp_int *a, const mp_int *m, mp_int *c); |
++#endif /* end MP_NUMTH */ |
++ |
++/* Input and output */ |
++#if MP_IOFUNC |
++void mp_print(mp_int *mp, FILE *ofp); |
++#endif /* end MP_IOFUNC */ |
++ |
++/* Base conversion */ |
++mp_err mp_read_raw(mp_int *mp, char *str, int len); |
++int mp_raw_size(mp_int *mp); |
++mp_err mp_toraw(mp_int *mp, char *str); |
++mp_err mp_read_radix(mp_int *mp, const char *str, int radix); |
++mp_err mp_read_variable_radix(mp_int *a, const char * str, int default_radix); |
++int mp_radix_size(mp_int *mp, int radix); |
++mp_err mp_toradix(mp_int *mp, char *str, int radix); |
++int mp_tovalue(char ch, int r); |
++ |
++#define mp_tobinary(M, S) mp_toradix((M), (S), 2) |
++#define mp_tooctal(M, S) mp_toradix((M), (S), 8) |
++#define mp_todecimal(M, S) mp_toradix((M), (S), 10) |
++#define mp_tohex(M, S) mp_toradix((M), (S), 16) |
++ |
++/* Error strings */ |
++const char *mp_strerror(mp_err ec); |
++ |
++/* Octet string conversion functions */ |
++mp_err mp_read_unsigned_octets(mp_int *mp, const unsigned char *str, mp_size len); |
++int mp_unsigned_octet_size(const mp_int *mp); |
++mp_err mp_to_unsigned_octets(const mp_int *mp, unsigned char *str, mp_size maxlen); |
++mp_err mp_to_signed_octets(const mp_int *mp, unsigned char *str, mp_size maxlen); |
++mp_err mp_to_fixlen_octets(const mp_int *mp, unsigned char *str, mp_size len); |
++ |
++/* Miscellaneous */ |
++mp_size mp_trailing_zeros(const mp_int *mp); |
++void freebl_cpuid(unsigned long op, unsigned long *eax, |
++ unsigned long *ebx, unsigned long *ecx, |
++ unsigned long *edx); |
++ |
++ |
++#define MP_CHECKOK(x) if (MP_OKAY > (res = (x))) goto CLEANUP |
++#define MP_CHECKERR(x) if (MP_OKAY > (res = (x))) goto CLEANUP |
++ |
++#if defined(MP_API_COMPATIBLE) |
++#define NEG MP_NEG |
++#define ZPOS MP_ZPOS |
++#define DIGIT_MAX MP_DIGIT_MAX |
++#define DIGIT_BIT MP_DIGIT_BIT |
++#define DIGIT_FMT MP_DIGIT_FMT |
++#define RADIX MP_RADIX |
++#define MAX_RADIX MP_MAX_RADIX |
++#define SIGN(MP) MP_SIGN(MP) |
++#define USED(MP) MP_USED(MP) |
++#define ALLOC(MP) MP_ALLOC(MP) |
++#define DIGITS(MP) MP_DIGITS(MP) |
++#define DIGIT(MP,N) MP_DIGIT(MP,N) |
++ |
++#if MP_ARGCHK == 1 |
++#define ARGCHK(X,Y) {if(!(X)){return (Y);}} |
++#elif MP_ARGCHK == 2 |
++#include <assert.h> |
++#define ARGCHK(X,Y) assert(X) |
++#else |
++#define ARGCHK(X,Y) /* */ |
++#endif |
++#endif /* defined MP_API_COMPATIBLE */ |
++ |
++#endif /* end _H_MPI_ */ |
+diff --git a/net/third_party/nss/ssl/mpi/mplogic.c b/net/third_party/nss/ssl/mpi/mplogic.c |
+new file mode 100644 |
+index 0000000..216f07a |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/mpi/mplogic.c |
+@@ -0,0 +1,466 @@ |
++/* |
++ * mplogic.c |
++ * |
++ * Bitwise logical operations on MPI values |
++ * |
++ * ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Original Code is the MPI Arbitrary Precision Integer Arithmetic library. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Michael J. Fromberger. |
++ * Portions created by the Initial Developer are Copyright (C) 1998 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++/* $Id: mplogic.c,v 1.15 2004/04/27 23:04:36 gerv%gerv.net Exp $ */ |
++ |
++#define MP_API_COMPATIBLE 1 |
++#include "mpi-priv.h" |
++#include "mplogic.h" |
++ |
++/* {{{ Lookup table for population count */ |
++ |
++static unsigned char bitc[] = { |
++ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, |
++ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, |
++ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, |
++ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, |
++ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, |
++ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, |
++ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, |
++ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, |
++ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, |
++ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, |
++ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, |
++ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, |
++ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, |
++ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, |
++ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, |
++ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 |
++}; |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* |
++ mpl_not(a, b) - compute b = ~a |
++ mpl_and(a, b, c) - compute c = a & b |
++ mpl_or(a, b, c) - compute c = a | b |
++ mpl_xor(a, b, c) - compute c = a ^ b |
++ */ |
++ |
++/* {{{ mpl_not(a, b) */ |
++ |
++mp_err mpl_not(mp_int *a, mp_int *b) |
++{ |
++ mp_err res; |
++ unsigned int ix; |
++ |
++ ARGCHK(a != NULL && b != NULL, MP_BADARG); |
++ |
++ if((res = mp_copy(a, b)) != MP_OKAY) |
++ return res; |
++ |
++ /* This relies on the fact that the digit type is unsigned */ |
++ for(ix = 0; ix < USED(b); ix++) |
++ DIGIT(b, ix) = ~DIGIT(b, ix); |
++ |
++ s_mp_clamp(b); |
++ |
++ return MP_OKAY; |
++ |
++} /* end mpl_not() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mpl_and(a, b, c) */ |
++ |
++mp_err mpl_and(mp_int *a, mp_int *b, mp_int *c) |
++{ |
++ mp_int *which, *other; |
++ mp_err res; |
++ unsigned int ix; |
++ |
++ ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG); |
++ |
++ if(USED(a) <= USED(b)) { |
++ which = a; |
++ other = b; |
++ } else { |
++ which = b; |
++ other = a; |
++ } |
++ |
++ if((res = mp_copy(which, c)) != MP_OKAY) |
++ return res; |
++ |
++ for(ix = 0; ix < USED(which); ix++) |
++ DIGIT(c, ix) &= DIGIT(other, ix); |
++ |
++ s_mp_clamp(c); |
++ |
++ return MP_OKAY; |
++ |
++} /* end mpl_and() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mpl_or(a, b, c) */ |
++ |
++mp_err mpl_or(mp_int *a, mp_int *b, mp_int *c) |
++{ |
++ mp_int *which, *other; |
++ mp_err res; |
++ unsigned int ix; |
++ |
++ ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG); |
++ |
++ if(USED(a) >= USED(b)) { |
++ which = a; |
++ other = b; |
++ } else { |
++ which = b; |
++ other = a; |
++ } |
++ |
++ if((res = mp_copy(which, c)) != MP_OKAY) |
++ return res; |
++ |
++ for(ix = 0; ix < USED(which); ix++) |
++ DIGIT(c, ix) |= DIGIT(other, ix); |
++ |
++ return MP_OKAY; |
++ |
++} /* end mpl_or() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mpl_xor(a, b, c) */ |
++ |
++mp_err mpl_xor(mp_int *a, mp_int *b, mp_int *c) |
++{ |
++ mp_int *which, *other; |
++ mp_err res; |
++ unsigned int ix; |
++ |
++ ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG); |
++ |
++ if(USED(a) >= USED(b)) { |
++ which = a; |
++ other = b; |
++ } else { |
++ which = b; |
++ other = a; |
++ } |
++ |
++ if((res = mp_copy(which, c)) != MP_OKAY) |
++ return res; |
++ |
++ for(ix = 0; ix < USED(which); ix++) |
++ DIGIT(c, ix) ^= DIGIT(other, ix); |
++ |
++ s_mp_clamp(c); |
++ |
++ return MP_OKAY; |
++ |
++} /* end mpl_xor() */ |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* |
++ mpl_rsh(a, b, d) - b = a >> d |
++ mpl_lsh(a, b, d) - b = a << d |
++ */ |
++ |
++/* {{{ mpl_rsh(a, b, d) */ |
++ |
++mp_err mpl_rsh(const mp_int *a, mp_int *b, mp_digit d) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && b != NULL, MP_BADARG); |
++ |
++ if((res = mp_copy(a, b)) != MP_OKAY) |
++ return res; |
++ |
++ s_mp_div_2d(b, d); |
++ |
++ return MP_OKAY; |
++ |
++} /* end mpl_rsh() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mpl_lsh(a, b, d) */ |
++ |
++mp_err mpl_lsh(const mp_int *a, mp_int *b, mp_digit d) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && b != NULL, MP_BADARG); |
++ |
++ if((res = mp_copy(a, b)) != MP_OKAY) |
++ return res; |
++ |
++ return s_mp_mul_2d(b, d); |
++ |
++} /* end mpl_lsh() */ |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* |
++ mpl_num_set(a, num) |
++ |
++ Count the number of set bits in the binary representation of a. |
++ Returns MP_OKAY and sets 'num' to be the number of such bits, if |
++ possible. If num is NULL, the result is thrown away, but it is |
++ not considered an error. |
++ |
++ mpl_num_clear() does basically the same thing for clear bits. |
++ */ |
++ |
++/* {{{ mpl_num_set(a, num) */ |
++ |
++mp_err mpl_num_set(mp_int *a, int *num) |
++{ |
++ unsigned int ix; |
++ int db, nset = 0; |
++ mp_digit cur; |
++ unsigned char reg; |
++ |
++ ARGCHK(a != NULL, MP_BADARG); |
++ |
++ for(ix = 0; ix < USED(a); ix++) { |
++ cur = DIGIT(a, ix); |
++ |
++ for(db = 0; db < sizeof(mp_digit); db++) { |
++ reg = (unsigned char)(cur >> (CHAR_BIT * db)); |
++ |
++ nset += bitc[reg]; |
++ } |
++ } |
++ |
++ if(num) |
++ *num = nset; |
++ |
++ return MP_OKAY; |
++ |
++} /* end mpl_num_set() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mpl_num_clear(a, num) */ |
++ |
++mp_err mpl_num_clear(mp_int *a, int *num) |
++{ |
++ unsigned int ix; |
++ int db, nset = 0; |
++ mp_digit cur; |
++ unsigned char reg; |
++ |
++ ARGCHK(a != NULL, MP_BADARG); |
++ |
++ for(ix = 0; ix < USED(a); ix++) { |
++ cur = DIGIT(a, ix); |
++ |
++ for(db = 0; db < sizeof(mp_digit); db++) { |
++ reg = (unsigned char)(cur >> (CHAR_BIT * db)); |
++ |
++ nset += bitc[UCHAR_MAX - reg]; |
++ } |
++ } |
++ |
++ if(num) |
++ *num = nset; |
++ |
++ return MP_OKAY; |
++ |
++ |
++} /* end mpl_num_clear() */ |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* |
++ mpl_parity(a) |
++ |
++ Determines the bitwise parity of the value given. Returns MP_EVEN |
++ if an even number of digits are set, MP_ODD if an odd number are |
++ set. |
++ */ |
++ |
++/* {{{ mpl_parity(a) */ |
++ |
++mp_err mpl_parity(mp_int *a) |
++{ |
++ unsigned int ix; |
++ int par = 0; |
++ mp_digit cur; |
++ |
++ ARGCHK(a != NULL, MP_BADARG); |
++ |
++ for(ix = 0; ix < USED(a); ix++) { |
++ int shft = (sizeof(mp_digit) * CHAR_BIT) / 2; |
++ |
++ cur = DIGIT(a, ix); |
++ |
++ /* Compute parity for current digit */ |
++ while(shft != 0) { |
++ cur ^= (cur >> shft); |
++ shft >>= 1; |
++ } |
++ cur &= 1; |
++ |
++ /* XOR with running parity so far */ |
++ par ^= cur; |
++ } |
++ |
++ if(par) |
++ return MP_ODD; |
++ else |
++ return MP_EVEN; |
++ |
++} /* end mpl_parity() */ |
++ |
++/* }}} */ |
++ |
++/* |
++ mpl_set_bit |
++ |
++ Returns MP_OKAY or some error code. |
++ Grows a if needed to set a bit to 1. |
++ */ |
++mp_err mpl_set_bit(mp_int *a, mp_size bitNum, mp_size value) |
++{ |
++ mp_size ix; |
++ mp_err rv; |
++ mp_digit mask; |
++ |
++ ARGCHK(a != NULL, MP_BADARG); |
++ |
++ ix = bitNum / MP_DIGIT_BIT; |
++ if (ix + 1 > MP_USED(a)) { |
++ rv = s_mp_pad(a, ix + 1); |
++ if (rv != MP_OKAY) |
++ return rv; |
++ } |
++ |
++ bitNum = bitNum % MP_DIGIT_BIT; |
++ mask = (mp_digit)1 << bitNum; |
++ if (value) |
++ MP_DIGIT(a,ix) |= mask; |
++ else |
++ MP_DIGIT(a,ix) &= ~mask; |
++ s_mp_clamp(a); |
++ return MP_OKAY; |
++} |
++ |
++/* |
++ mpl_get_bit |
++ |
++ returns 0 or 1 or some (negative) error code. |
++ */ |
++mp_err mpl_get_bit(const mp_int *a, mp_size bitNum) |
++{ |
++ mp_size bit, ix; |
++ mp_err rv; |
++ |
++ ARGCHK(a != NULL, MP_BADARG); |
++ |
++ ix = bitNum / MP_DIGIT_BIT; |
++ ARGCHK(ix <= MP_USED(a) - 1, MP_RANGE); |
++ |
++ bit = bitNum % MP_DIGIT_BIT; |
++ rv = (mp_err)(MP_DIGIT(a, ix) >> bit) & 1; |
++ return rv; |
++} |
++ |
++/* |
++ mpl_get_bits |
++ - Extracts numBits bits from a, where the least significant extracted bit |
++ is bit lsbNum. Returns a negative value if error occurs. |
++ - Because sign bit is used to indicate error, maximum number of bits to |
++ be returned is the lesser of (a) the number of bits in an mp_digit, or |
++ (b) one less than the number of bits in an mp_err. |
++ - lsbNum + numbits can be greater than the number of significant bits in |
++ integer a, as long as bit lsbNum is in the high order digit of a. |
++ */ |
++mp_err mpl_get_bits(const mp_int *a, mp_size lsbNum, mp_size numBits) |
++{ |
++ mp_size rshift = (lsbNum % MP_DIGIT_BIT); |
++ mp_size lsWndx = (lsbNum / MP_DIGIT_BIT); |
++ mp_digit * digit = MP_DIGITS(a) + lsWndx; |
++ mp_digit mask = ((1 << numBits) - 1); |
++ |
++ ARGCHK(numBits < CHAR_BIT * sizeof mask, MP_BADARG); |
++ ARGCHK(MP_HOWMANY(lsbNum, MP_DIGIT_BIT) <= MP_USED(a), MP_RANGE); |
++ |
++ if ((numBits + lsbNum % MP_DIGIT_BIT <= MP_DIGIT_BIT) || |
++ (lsWndx + 1 >= MP_USED(a))) { |
++ mask &= (digit[0] >> rshift); |
++ } else { |
++ mask &= ((digit[0] >> rshift) | (digit[1] << (MP_DIGIT_BIT - rshift))); |
++ } |
++ return (mp_err)mask; |
++} |
++ |
++/* |
++ mpl_significant_bits |
++ returns number of significnant bits in abs(a). |
++ returns 1 if value is zero. |
++ */ |
++mp_err mpl_significant_bits(const mp_int *a) |
++{ |
++ mp_err bits = 0; |
++ int ix; |
++ |
++ ARGCHK(a != NULL, MP_BADARG); |
++ |
++ ix = MP_USED(a); |
++ for (ix = MP_USED(a); ix > 0; ) { |
++ mp_digit d; |
++ d = MP_DIGIT(a, --ix); |
++ if (d) { |
++ while (d) { |
++ ++bits; |
++ d >>= 1; |
++ } |
++ break; |
++ } |
++ } |
++ bits += ix * MP_DIGIT_BIT; |
++ if (!bits) |
++ bits = 1; |
++ return bits; |
++} |
++ |
++/*------------------------------------------------------------------------*/ |
++/* HERE THERE BE DRAGONS */ |
+diff --git a/net/third_party/nss/ssl/mpi/mplogic.h b/net/third_party/nss/ssl/mpi/mplogic.h |
+new file mode 100644 |
+index 0000000..de831dc |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/mpi/mplogic.h |
+@@ -0,0 +1,85 @@ |
++/* |
++ * mplogic.h |
++ * |
++ * Bitwise logical operations on MPI values |
++ * |
++ * ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Original Code is the MPI Arbitrary Precision Integer Arithmetic library. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Michael J. Fromberger. |
++ * Portions created by the Initial Developer are Copyright (C) 1998 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++/* $Id: mplogic.h,v 1.7 2004/04/27 23:04:36 gerv%gerv.net Exp $ */ |
++ |
++#ifndef _H_MPLOGIC_ |
++#define _H_MPLOGIC_ |
++ |
++#include "mpi.h" |
++ |
++/* |
++ The logical operations treat an mp_int as if it were a bit vector, |
++ without regard to its sign (an mp_int is represented in a signed |
++ magnitude format). Values are treated as if they had an infinite |
++ string of zeros left of the most-significant bit. |
++ */ |
++ |
++/* Parity results */ |
++ |
++#define MP_EVEN MP_YES |
++#define MP_ODD MP_NO |
++ |
++/* Bitwise functions */ |
++ |
++mp_err mpl_not(mp_int *a, mp_int *b); /* one's complement */ |
++mp_err mpl_and(mp_int *a, mp_int *b, mp_int *c); /* bitwise AND */ |
++mp_err mpl_or(mp_int *a, mp_int *b, mp_int *c); /* bitwise OR */ |
++mp_err mpl_xor(mp_int *a, mp_int *b, mp_int *c); /* bitwise XOR */ |
++ |
++/* Shift functions */ |
++ |
++mp_err mpl_rsh(const mp_int *a, mp_int *b, mp_digit d); /* right shift */ |
++mp_err mpl_lsh(const mp_int *a, mp_int *b, mp_digit d); /* left shift */ |
++ |
++/* Bit count and parity */ |
++ |
++mp_err mpl_num_set(mp_int *a, int *num); /* count set bits */ |
++mp_err mpl_num_clear(mp_int *a, int *num); /* count clear bits */ |
++mp_err mpl_parity(mp_int *a); /* determine parity */ |
++ |
++/* Get & Set the value of a bit */ |
++ |
++mp_err mpl_set_bit(mp_int *a, mp_size bitNum, mp_size value); |
++mp_err mpl_get_bit(const mp_int *a, mp_size bitNum); |
++mp_err mpl_get_bits(const mp_int *a, mp_size lsbNum, mp_size numBits); |
++mp_err mpl_significant_bits(const mp_int *a); |
++ |
++#endif /* end _H_MPLOGIC_ */ |
+diff --git a/net/third_party/nss/ssl/mpi/mpmontg.c b/net/third_party/nss/ssl/mpi/mpmontg.c |
+new file mode 100644 |
+index 0000000..088b7eb |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/mpi/mpmontg.c |
+@@ -0,0 +1,1210 @@ |
++/* ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Original Code is the Netscape security libraries. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Netscape Communications Corporation. |
++ * Portions created by the Initial Developer are Copyright (C) 2000 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * Sheueling Chang Shantz <sheueling.chang@sun.com>, |
++ * Stephen Fung <stephen.fung@sun.com>, and |
++ * Douglas Stebila <douglas@stebila.ca> of Sun Laboratories. |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++/* $Id: mpmontg.c,v 1.22 2010/05/02 22:36:41 nelson%bolyard.com Exp $ */ |
++ |
++/* This file implements moduluar exponentiation using Montgomery's |
++ * method for modular reduction. This file implements the method |
++ * described as "Improvement 1" in the paper "A Cryptogrpahic Library for |
++ * the Motorola DSP56000" by Stephen R. Dusse' and Burton S. Kaliski Jr. |
++ * published in "Advances in Cryptology: Proceedings of EUROCRYPT '90" |
++ * "Lecture Notes in Computer Science" volume 473, 1991, pg 230-244, |
++ * published by Springer Verlag. |
++ */ |
++ |
++#define MP_API_COMPATIBLE 1 |
++#define MP_USING_CACHE_SAFE_MOD_EXP 1 |
++#include <string.h> |
++#include "mpi-priv.h" |
++#include "mplogic.h" |
++#include "mpprime.h" |
++#ifdef MP_USING_MONT_MULF |
++#include "montmulf.h" |
++#endif |
++#include <stddef.h> /* ptrdiff_t */ |
++ |
++/* if MP_CHAR_STORE_SLOW is defined, we */ |
++/* need to know endianness of this platform. */ |
++#ifdef MP_CHAR_STORE_SLOW |
++#if !defined(MP_IS_BIG_ENDIAN) && !defined(MP_IS_LITTLE_ENDIAN) |
++#error "You must define MP_IS_BIG_ENDIAN or MP_IS_LITTLE_ENDIAN\n" \ |
++ " if you define MP_CHAR_STORE_SLOW." |
++#endif |
++#endif |
++ |
++#define STATIC |
++ |
++#define MAX_ODD_INTS 32 /* 2 ** (WINDOW_BITS - 1) */ |
++ |
++#if defined(_WIN32_WCE) |
++#define ABORT res = MP_UNDEF; goto CLEANUP |
++#else |
++#define ABORT abort() |
++#endif |
++ |
++/* computes T = REDC(T), 2^b == R */ |
++mp_err s_mp_redc(mp_int *T, mp_mont_modulus *mmm) |
++{ |
++ mp_err res; |
++ mp_size i; |
++ |
++ i = MP_USED(T) + MP_USED(&mmm->N) + 2; |
++ MP_CHECKOK( s_mp_pad(T, i) ); |
++ for (i = 0; i < MP_USED(&mmm->N); ++i ) { |
++ mp_digit m_i = MP_DIGIT(T, i) * mmm->n0prime; |
++ /* T += N * m_i * (MP_RADIX ** i); */ |
++ MP_CHECKOK( s_mp_mul_d_add_offset(&mmm->N, m_i, T, i) ); |
++ } |
++ s_mp_clamp(T); |
++ |
++ /* T /= R */ |
++ s_mp_div_2d(T, mmm->b); |
++ |
++ if ((res = s_mp_cmp(T, &mmm->N)) >= 0) { |
++ /* T = T - N */ |
++ MP_CHECKOK( s_mp_sub(T, &mmm->N) ); |
++#ifdef DEBUG |
++ if ((res = mp_cmp(T, &mmm->N)) >= 0) { |
++ res = MP_UNDEF; |
++ goto CLEANUP; |
++ } |
++#endif |
++ } |
++ res = MP_OKAY; |
++CLEANUP: |
++ return res; |
++} |
++ |
++#if !defined(MP_ASSEMBLY_MUL_MONT) && !defined(MP_MONT_USE_MP_MUL) |
++mp_err s_mp_mul_mont(const mp_int *a, const mp_int *b, mp_int *c, |
++ mp_mont_modulus *mmm) |
++{ |
++ mp_digit *pb; |
++ mp_digit m_i; |
++ mp_err res; |
++ mp_size ib; |
++ mp_size useda, usedb; |
++ |
++ ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG); |
++ |
++ if (MP_USED(a) < MP_USED(b)) { |
++ const mp_int *xch = b; /* switch a and b, to do fewer outer loops */ |
++ b = a; |
++ a = xch; |
++ } |
++ |
++ MP_USED(c) = 1; MP_DIGIT(c, 0) = 0; |
++ ib = MP_USED(a) + MP_MAX(MP_USED(b), MP_USED(&mmm->N)) + 2; |
++ if((res = s_mp_pad(c, ib)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ useda = MP_USED(a); |
++ pb = MP_DIGITS(b); |
++ s_mpv_mul_d(MP_DIGITS(a), useda, *pb++, MP_DIGITS(c)); |
++ s_mp_setz(MP_DIGITS(c) + useda + 1, ib - (useda + 1)); |
++ m_i = MP_DIGIT(c, 0) * mmm->n0prime; |
++ s_mp_mul_d_add_offset(&mmm->N, m_i, c, 0); |
++ |
++ /* Outer loop: Digits of b */ |
++ usedb = MP_USED(b); |
++ for (ib = 1; ib < usedb; ib++) { |
++ mp_digit b_i = *pb++; |
++ |
++ /* Inner product: Digits of a */ |
++ if (b_i) |
++ s_mpv_mul_d_add_prop(MP_DIGITS(a), useda, b_i, MP_DIGITS(c) + ib); |
++ m_i = MP_DIGIT(c, ib) * mmm->n0prime; |
++ s_mp_mul_d_add_offset(&mmm->N, m_i, c, ib); |
++ } |
++ if (usedb < MP_USED(&mmm->N)) { |
++ for (usedb = MP_USED(&mmm->N); ib < usedb; ++ib ) { |
++ m_i = MP_DIGIT(c, ib) * mmm->n0prime; |
++ s_mp_mul_d_add_offset(&mmm->N, m_i, c, ib); |
++ } |
++ } |
++ s_mp_clamp(c); |
++ s_mp_div_2d(c, mmm->b); |
++ if (s_mp_cmp(c, &mmm->N) >= 0) { |
++ MP_CHECKOK( s_mp_sub(c, &mmm->N) ); |
++ } |
++ res = MP_OKAY; |
++ |
++CLEANUP: |
++ return res; |
++} |
++#endif |
++ |
++STATIC |
++mp_err s_mp_to_mont(const mp_int *x, mp_mont_modulus *mmm, mp_int *xMont) |
++{ |
++ mp_err res; |
++ |
++ /* xMont = x * R mod N where N is modulus */ |
++ MP_CHECKOK( mpl_lsh(x, xMont, mmm->b) ); /* xMont = x << b */ |
++ MP_CHECKOK( mp_div(xMont, &mmm->N, 0, xMont) ); /* mod N */ |
++CLEANUP: |
++ return res; |
++} |
++ |
++#ifdef MP_USING_MONT_MULF |
++ |
++/* the floating point multiply is already cache safe, |
++ * don't turn on cache safe unless we specifically |
++ * force it */ |
++#ifndef MP_FORCE_CACHE_SAFE |
++#undef MP_USING_CACHE_SAFE_MOD_EXP |
++#endif |
++ |
++unsigned int mp_using_mont_mulf = 1; |
++ |
++/* computes montgomery square of the integer in mResult */ |
++#define SQR \ |
++ conv_i32_to_d32_and_d16(dm1, d16Tmp, mResult, nLen); \ |
++ mont_mulf_noconv(mResult, dm1, d16Tmp, \ |
++ dTmp, dn, MP_DIGITS(modulus), nLen, dn0) |
++ |
++/* computes montgomery product of x and the integer in mResult */ |
++#define MUL(x) \ |
++ conv_i32_to_d32(dm1, mResult, nLen); \ |
++ mont_mulf_noconv(mResult, dm1, oddPowers[x], \ |
++ dTmp, dn, MP_DIGITS(modulus), nLen, dn0) |
++ |
++/* Do modular exponentiation using floating point multiply code. */ |
++mp_err mp_exptmod_f(const mp_int * montBase, |
++ const mp_int * exponent, |
++ const mp_int * modulus, |
++ mp_int * result, |
++ mp_mont_modulus *mmm, |
++ int nLen, |
++ mp_size bits_in_exponent, |
++ mp_size window_bits, |
++ mp_size odd_ints) |
++{ |
++ mp_digit *mResult; |
++ double *dBuf = 0, *dm1, *dn, *dSqr, *d16Tmp, *dTmp; |
++ double dn0; |
++ mp_size i; |
++ mp_err res; |
++ int expOff; |
++ int dSize = 0, oddPowSize, dTmpSize; |
++ mp_int accum1; |
++ double *oddPowers[MAX_ODD_INTS]; |
++ |
++ /* function for computing n0prime only works if n0 is odd */ |
++ |
++ MP_DIGITS(&accum1) = 0; |
++ |
++ for (i = 0; i < MAX_ODD_INTS; ++i) |
++ oddPowers[i] = 0; |
++ |
++ MP_CHECKOK( mp_init_size(&accum1, 3 * nLen + 2) ); |
++ |
++ mp_set(&accum1, 1); |
++ MP_CHECKOK( s_mp_to_mont(&accum1, mmm, &accum1) ); |
++ MP_CHECKOK( s_mp_pad(&accum1, nLen) ); |
++ |
++ oddPowSize = 2 * nLen + 1; |
++ dTmpSize = 2 * oddPowSize; |
++ dSize = sizeof(double) * (nLen * 4 + 1 + |
++ ((odd_ints + 1) * oddPowSize) + dTmpSize); |
++ dBuf = (double *)malloc(dSize); |
++ dm1 = dBuf; /* array of d32 */ |
++ dn = dBuf + nLen; /* array of d32 */ |
++ dSqr = dn + nLen; /* array of d32 */ |
++ d16Tmp = dSqr + nLen; /* array of d16 */ |
++ dTmp = d16Tmp + oddPowSize; |
++ |
++ for (i = 0; i < odd_ints; ++i) { |
++ oddPowers[i] = dTmp; |
++ dTmp += oddPowSize; |
++ } |
++ mResult = (mp_digit *)(dTmp + dTmpSize); /* size is nLen + 1 */ |
++ |
++ /* Make dn and dn0 */ |
++ conv_i32_to_d32(dn, MP_DIGITS(modulus), nLen); |
++ dn0 = (double)(mmm->n0prime & 0xffff); |
++ |
++ /* Make dSqr */ |
++ conv_i32_to_d32_and_d16(dm1, oddPowers[0], MP_DIGITS(montBase), nLen); |
++ mont_mulf_noconv(mResult, dm1, oddPowers[0], |
++ dTmp, dn, MP_DIGITS(modulus), nLen, dn0); |
++ conv_i32_to_d32(dSqr, mResult, nLen); |
++ |
++ for (i = 1; i < odd_ints; ++i) { |
++ mont_mulf_noconv(mResult, dSqr, oddPowers[i - 1], |
++ dTmp, dn, MP_DIGITS(modulus), nLen, dn0); |
++ conv_i32_to_d16(oddPowers[i], mResult, nLen); |
++ } |
++ |
++ s_mp_copy(MP_DIGITS(&accum1), mResult, nLen); /* from, to, len */ |
++ |
++ for (expOff = bits_in_exponent - window_bits; expOff >= 0; expOff -= window_bits) { |
++ mp_size smallExp; |
++ MP_CHECKOK( mpl_get_bits(exponent, expOff, window_bits) ); |
++ smallExp = (mp_size)res; |
++ |
++ if (window_bits == 1) { |
++ if (!smallExp) { |
++ SQR; |
++ } else if (smallExp & 1) { |
++ SQR; MUL(0); |
++ } else { |
++ ABORT; |
++ } |
++ } else if (window_bits == 4) { |
++ if (!smallExp) { |
++ SQR; SQR; SQR; SQR; |
++ } else if (smallExp & 1) { |
++ SQR; SQR; SQR; SQR; MUL(smallExp/2); |
++ } else if (smallExp & 2) { |
++ SQR; SQR; SQR; MUL(smallExp/4); SQR; |
++ } else if (smallExp & 4) { |
++ SQR; SQR; MUL(smallExp/8); SQR; SQR; |
++ } else if (smallExp & 8) { |
++ SQR; MUL(smallExp/16); SQR; SQR; SQR; |
++ } else { |
++ ABORT; |
++ } |
++ } else if (window_bits == 5) { |
++ if (!smallExp) { |
++ SQR; SQR; SQR; SQR; SQR; |
++ } else if (smallExp & 1) { |
++ SQR; SQR; SQR; SQR; SQR; MUL(smallExp/2); |
++ } else if (smallExp & 2) { |
++ SQR; SQR; SQR; SQR; MUL(smallExp/4); SQR; |
++ } else if (smallExp & 4) { |
++ SQR; SQR; SQR; MUL(smallExp/8); SQR; SQR; |
++ } else if (smallExp & 8) { |
++ SQR; SQR; MUL(smallExp/16); SQR; SQR; SQR; |
++ } else if (smallExp & 0x10) { |
++ SQR; MUL(smallExp/32); SQR; SQR; SQR; SQR; |
++ } else { |
++ ABORT; |
++ } |
++ } else if (window_bits == 6) { |
++ if (!smallExp) { |
++ SQR; SQR; SQR; SQR; SQR; SQR; |
++ } else if (smallExp & 1) { |
++ SQR; SQR; SQR; SQR; SQR; SQR; MUL(smallExp/2); |
++ } else if (smallExp & 2) { |
++ SQR; SQR; SQR; SQR; SQR; MUL(smallExp/4); SQR; |
++ } else if (smallExp & 4) { |
++ SQR; SQR; SQR; SQR; MUL(smallExp/8); SQR; SQR; |
++ } else if (smallExp & 8) { |
++ SQR; SQR; SQR; MUL(smallExp/16); SQR; SQR; SQR; |
++ } else if (smallExp & 0x10) { |
++ SQR; SQR; MUL(smallExp/32); SQR; SQR; SQR; SQR; |
++ } else if (smallExp & 0x20) { |
++ SQR; MUL(smallExp/64); SQR; SQR; SQR; SQR; SQR; |
++ } else { |
++ ABORT; |
++ } |
++ } else { |
++ ABORT; |
++ } |
++ } |
++ |
++ s_mp_copy(mResult, MP_DIGITS(&accum1), nLen); /* from, to, len */ |
++ |
++ res = s_mp_redc(&accum1, mmm); |
++ mp_exch(&accum1, result); |
++ |
++CLEANUP: |
++ mp_clear(&accum1); |
++ if (dBuf) { |
++ if (dSize) |
++ memset(dBuf, 0, dSize); |
++ free(dBuf); |
++ } |
++ |
++ return res; |
++} |
++#undef SQR |
++#undef MUL |
++#endif |
++ |
++#define SQR(a,b) \ |
++ MP_CHECKOK( mp_sqr(a, b) );\ |
++ MP_CHECKOK( s_mp_redc(b, mmm) ) |
++ |
++#if defined(MP_MONT_USE_MP_MUL) |
++#define MUL(x,a,b) \ |
++ MP_CHECKOK( mp_mul(a, oddPowers + (x), b) ); \ |
++ MP_CHECKOK( s_mp_redc(b, mmm) ) |
++#else |
++#define MUL(x,a,b) \ |
++ MP_CHECKOK( s_mp_mul_mont(a, oddPowers + (x), b, mmm) ) |
++#endif |
++ |
++#define SWAPPA ptmp = pa1; pa1 = pa2; pa2 = ptmp |
++ |
++/* Do modular exponentiation using integer multiply code. */ |
++mp_err mp_exptmod_i(const mp_int * montBase, |
++ const mp_int * exponent, |
++ const mp_int * modulus, |
++ mp_int * result, |
++ mp_mont_modulus *mmm, |
++ int nLen, |
++ mp_size bits_in_exponent, |
++ mp_size window_bits, |
++ mp_size odd_ints) |
++{ |
++ mp_int *pa1, *pa2, *ptmp; |
++ mp_size i; |
++ mp_err res; |
++ int expOff; |
++ mp_int accum1, accum2, power2, oddPowers[MAX_ODD_INTS]; |
++ |
++ /* power2 = base ** 2; oddPowers[i] = base ** (2*i + 1); */ |
++ /* oddPowers[i] = base ** (2*i + 1); */ |
++ |
++ MP_DIGITS(&accum1) = 0; |
++ MP_DIGITS(&accum2) = 0; |
++ MP_DIGITS(&power2) = 0; |
++ for (i = 0; i < MAX_ODD_INTS; ++i) { |
++ MP_DIGITS(oddPowers + i) = 0; |
++ } |
++ |
++ MP_CHECKOK( mp_init_size(&accum1, 3 * nLen + 2) ); |
++ MP_CHECKOK( mp_init_size(&accum2, 3 * nLen + 2) ); |
++ |
++ MP_CHECKOK( mp_init_copy(&oddPowers[0], montBase) ); |
++ |
++ mp_init_size(&power2, nLen + 2 * MP_USED(montBase) + 2); |
++ MP_CHECKOK( mp_sqr(montBase, &power2) ); /* power2 = montBase ** 2 */ |
++ MP_CHECKOK( s_mp_redc(&power2, mmm) ); |
++ |
++ for (i = 1; i < odd_ints; ++i) { |
++ mp_init_size(oddPowers + i, nLen + 2 * MP_USED(&power2) + 2); |
++ MP_CHECKOK( mp_mul(oddPowers + (i - 1), &power2, oddPowers + i) ); |
++ MP_CHECKOK( s_mp_redc(oddPowers + i, mmm) ); |
++ } |
++ |
++ /* set accumulator to montgomery residue of 1 */ |
++ mp_set(&accum1, 1); |
++ MP_CHECKOK( s_mp_to_mont(&accum1, mmm, &accum1) ); |
++ pa1 = &accum1; |
++ pa2 = &accum2; |
++ |
++ for (expOff = bits_in_exponent - window_bits; expOff >= 0; expOff -= window_bits) { |
++ mp_size smallExp; |
++ MP_CHECKOK( mpl_get_bits(exponent, expOff, window_bits) ); |
++ smallExp = (mp_size)res; |
++ |
++ if (window_bits == 1) { |
++ if (!smallExp) { |
++ SQR(pa1,pa2); SWAPPA; |
++ } else if (smallExp & 1) { |
++ SQR(pa1,pa2); MUL(0,pa2,pa1); |
++ } else { |
++ ABORT; |
++ } |
++ } else if (window_bits == 4) { |
++ if (!smallExp) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ } else if (smallExp & 1) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ MUL(smallExp/2, pa1,pa2); SWAPPA; |
++ } else if (smallExp & 2) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); |
++ MUL(smallExp/4,pa2,pa1); SQR(pa1,pa2); SWAPPA; |
++ } else if (smallExp & 4) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); MUL(smallExp/8,pa1,pa2); |
++ SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA; |
++ } else if (smallExp & 8) { |
++ SQR(pa1,pa2); MUL(smallExp/16,pa2,pa1); SQR(pa1,pa2); |
++ SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA; |
++ } else { |
++ ABORT; |
++ } |
++ } else if (window_bits == 5) { |
++ if (!smallExp) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ SQR(pa1,pa2); SWAPPA; |
++ } else if (smallExp & 1) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ SQR(pa1,pa2); MUL(smallExp/2,pa2,pa1); |
++ } else if (smallExp & 2) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ MUL(smallExp/4,pa1,pa2); SQR(pa2,pa1); |
++ } else if (smallExp & 4) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); |
++ MUL(smallExp/8,pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ } else if (smallExp & 8) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); MUL(smallExp/16,pa1,pa2); |
++ SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ } else if (smallExp & 0x10) { |
++ SQR(pa1,pa2); MUL(smallExp/32,pa2,pa1); SQR(pa1,pa2); |
++ SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ } else { |
++ ABORT; |
++ } |
++ } else if (window_bits == 6) { |
++ if (!smallExp) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ SQR(pa1,pa2); SQR(pa2,pa1); |
++ } else if (smallExp & 1) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ SQR(pa1,pa2); SQR(pa2,pa1); MUL(smallExp/2,pa1,pa2); SWAPPA; |
++ } else if (smallExp & 2) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ SQR(pa1,pa2); MUL(smallExp/4,pa2,pa1); SQR(pa1,pa2); SWAPPA; |
++ } else if (smallExp & 4) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ MUL(smallExp/8,pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA; |
++ } else if (smallExp & 8) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); |
++ MUL(smallExp/16,pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ SQR(pa1,pa2); SWAPPA; |
++ } else if (smallExp & 0x10) { |
++ SQR(pa1,pa2); SQR(pa2,pa1); MUL(smallExp/32,pa1,pa2); |
++ SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA; |
++ } else if (smallExp & 0x20) { |
++ SQR(pa1,pa2); MUL(smallExp/64,pa2,pa1); SQR(pa1,pa2); |
++ SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA; |
++ } else { |
++ ABORT; |
++ } |
++ } else { |
++ ABORT; |
++ } |
++ } |
++ |
++ res = s_mp_redc(pa1, mmm); |
++ mp_exch(pa1, result); |
++ |
++CLEANUP: |
++ mp_clear(&accum1); |
++ mp_clear(&accum2); |
++ mp_clear(&power2); |
++ for (i = 0; i < odd_ints; ++i) { |
++ mp_clear(oddPowers + i); |
++ } |
++ return res; |
++} |
++#undef SQR |
++#undef MUL |
++ |
++#ifdef MP_USING_CACHE_SAFE_MOD_EXP |
++unsigned int mp_using_cache_safe_exp = 1; |
++#endif |
++ |
++mp_err mp_set_safe_modexp(int value) |
++{ |
++#ifdef MP_USING_CACHE_SAFE_MOD_EXP |
++ mp_using_cache_safe_exp = value; |
++ return MP_OKAY; |
++#else |
++ if (value == 0) { |
++ return MP_OKAY; |
++ } |
++ return MP_BADARG; |
++#endif |
++} |
++ |
++#ifdef MP_USING_CACHE_SAFE_MOD_EXP |
++#define WEAVE_WORD_SIZE 4 |
++ |
++#ifndef MP_CHAR_STORE_SLOW |
++/* |
++ * mpi_to_weave takes an array of bignums, a matrix in which each bignum |
++ * occupies all the columns of a row, and transposes it into a matrix in |
++ * which each bignum occupies a column of every row. The first row of the |
++ * input matrix becomes the first column of the output matrix. The n'th |
++ * row of input becomes the n'th column of output. The input data is said |
++ * to be "interleaved" or "woven" into the output matrix. |
++ * |
++ * The array of bignums is left in this woven form. Each time a single |
++ * bignum value is needed, it is recreated by fetching the n'th column, |
++ * forming a single row which is the new bignum. |
++ * |
++ * The purpose of this interleaving is make it impossible to determine which |
++ * of the bignums is being used in any one operation by examining the pattern |
++ * of cache misses. |
++ * |
++ * The weaving function does not transpose the entire input matrix in one call. |
++ * It transposes 4 rows of mp_ints into their respective columns of output. |
++ * |
++ * There are two different implementations of the weaving and unweaving code |
++ * in this file. One uses byte loads and stores. The second uses loads and |
++ * stores of mp_weave_word size values. The weaved forms of these two |
++ * implementations differ. Consequently, each one has its own explanation. |
++ * |
++ * Here is the explanation for the byte-at-a-time implementation. |
++ * |
++ * This implementation treats each mp_int bignum as an array of bytes, |
++ * rather than as an array of mp_digits. It stores those bytes as a |
++ * column of bytes in the output matrix. It doesn't care if the machine |
++ * uses big-endian or little-endian byte ordering within mp_digits. |
++ * The first byte of the mp_digit array becomes the first byte in the output |
++ * column, regardless of whether that byte is the MSB or LSB of the mp_digit. |
++ * |
++ * "bignums" is an array of mp_ints. |
++ * It points to four rows, four mp_ints, a subset of a larger array of mp_ints. |
++ * |
++ * "weaved" is the weaved output matrix. |
++ * The first byte of bignums[0] is stored in weaved[0]. |
++ * |
++ * "nBignums" is the total number of bignums in the array of which "bignums" |
++ * is a part. |
++ * |
++ * "nDigits" is the size in mp_digits of each mp_int in the "bignums" array. |
++ * mp_ints that use less than nDigits digits are logically padded with zeros |
++ * while being stored in the weaved array. |
++ */ |
++mp_err mpi_to_weave(const mp_int *bignums, |
++ unsigned char *weaved, |
++ mp_size nDigits, /* in each mp_int of input */ |
++ mp_size nBignums) /* in the entire source array */ |
++{ |
++ mp_size i; |
++ unsigned char * endDest = weaved + (nDigits * nBignums * sizeof(mp_digit)); |
++ |
++ for (i=0; i < WEAVE_WORD_SIZE; i++) { |
++ mp_size used = MP_USED(&bignums[i]); |
++ unsigned char *pSrc = (unsigned char *)MP_DIGITS(&bignums[i]); |
++ unsigned char *endSrc = pSrc + (used * sizeof(mp_digit)); |
++ unsigned char *pDest = weaved + i; |
++ |
++ ARGCHK(MP_SIGN(&bignums[i]) == MP_ZPOS, MP_BADARG); |
++ ARGCHK(used <= nDigits, MP_BADARG); |
++ |
++ for (; pSrc < endSrc; pSrc++) { |
++ *pDest = *pSrc; |
++ pDest += nBignums; |
++ } |
++ while (pDest < endDest) { |
++ *pDest = 0; |
++ pDest += nBignums; |
++ } |
++ } |
++ |
++ return MP_OKAY; |
++} |
++ |
++/* Reverse the operation above for one mp_int. |
++ * Reconstruct one mp_int from its column in the weaved array. |
++ * "pSrc" points to the offset into the weave array of the bignum we |
++ * are going to reconstruct. |
++ */ |
++mp_err weave_to_mpi(mp_int *a, /* output, result */ |
++ const unsigned char *pSrc, /* input, byte matrix */ |
++ mp_size nDigits, /* per mp_int output */ |
++ mp_size nBignums) /* bignums in weaved matrix */ |
++{ |
++ unsigned char *pDest = (unsigned char *)MP_DIGITS(a); |
++ unsigned char *endDest = pDest + (nDigits * sizeof(mp_digit)); |
++ |
++ MP_SIGN(a) = MP_ZPOS; |
++ MP_USED(a) = nDigits; |
++ |
++ for (; pDest < endDest; pSrc += nBignums, pDest++) { |
++ *pDest = *pSrc; |
++ } |
++ s_mp_clamp(a); |
++ return MP_OKAY; |
++} |
++ |
++#else |
++ |
++/* Need a primitive that we know is 32 bits long... */ |
++/* this is true on all modern processors we know of today*/ |
++typedef unsigned int mp_weave_word; |
++ |
++/* |
++ * on some platforms character stores into memory is very expensive since they |
++ * generate a read/modify/write operation on the bus. On those platforms |
++ * we need to do integer writes to the bus. Because of some unrolled code, |
++ * in this current code the size of mp_weave_word must be four. The code that |
++ * makes this assumption explicity is called out. (on some platforms a write |
++ * of 4 bytes still requires a single read-modify-write operation. |
++ * |
++ * This function is takes the identical parameters as the function above, |
++ * however it lays out the final array differently. Where the previous function |
++ * treats the mpi_int as an byte array, this function treats it as an array of |
++ * mp_digits where each digit is stored in big endian order. |
++ * |
++ * since we need to interleave on a byte by byte basis, we need to collect |
++ * several mpi structures together into a single uint32 before we write. We |
++ * also need to make sure the uint32 is arranged so that the first value of |
++ * the first array winds up in b[0]. This means construction of that uint32 |
++ * is endian specific (even though the layout of the mp_digits in the array |
++ * is always big endian). |
++ * |
++ * The final data is stored as follows : |
++ * |
++ * Our same logical array p array, m is sizeof(mp_digit), |
++ * N is still count and n is now b_size. If we define p[i].digit[j]0 as the |
++ * most significant byte of the word p[i].digit[j], p[i].digit[j]1 as |
++ * the next most significant byte of p[i].digit[j], ... and p[i].digit[j]m-1 |
++ * is the least significant byte. |
++ * Our array would look like: |
++ * p[0].digit[0]0 p[1].digit[0]0 ... p[N-2].digit[0]0 p[N-1].digit[0]0 |
++ * p[0].digit[0]1 p[1].digit[0]1 ... p[N-2].digit[0]1 p[N-1].digit[0]1 |
++ * . . |
++ * p[0].digit[0]m-1 p[1].digit[0]m-1 ... p[N-2].digit[0]m-1 p[N-1].digit[0]m-1 |
++ * p[0].digit[1]0 p[1].digit[1]0 ... p[N-2].digit[1]0 p[N-1].digit[1]0 |
++ * . . |
++ * . . |
++ * p[0].digit[n-1]m-2 p[1].digit[n-1]m-2 ... p[N-2].digit[n-1]m-2 p[N-1].digit[n-1]m-2 |
++ * p[0].digit[n-1]m-1 p[1].digit[n-1]m-1 ... p[N-2].digit[n-1]m-1 p[N-1].digit[n-1]m-1 |
++ * |
++ */ |
++mp_err mpi_to_weave(const mp_int *a, unsigned char *b, |
++ mp_size b_size, mp_size count) |
++{ |
++ mp_size i; |
++ mp_digit *digitsa0; |
++ mp_digit *digitsa1; |
++ mp_digit *digitsa2; |
++ mp_digit *digitsa3; |
++ mp_size useda0; |
++ mp_size useda1; |
++ mp_size useda2; |
++ mp_size useda3; |
++ mp_weave_word *weaved = (mp_weave_word *)b; |
++ |
++ count = count/sizeof(mp_weave_word); |
++ |
++ /* this code pretty much depends on this ! */ |
++#if MP_ARGCHK == 2 |
++ assert(WEAVE_WORD_SIZE == 4); |
++ assert(sizeof(mp_weave_word) == 4); |
++#endif |
++ |
++ digitsa0 = MP_DIGITS(&a[0]); |
++ digitsa1 = MP_DIGITS(&a[1]); |
++ digitsa2 = MP_DIGITS(&a[2]); |
++ digitsa3 = MP_DIGITS(&a[3]); |
++ useda0 = MP_USED(&a[0]); |
++ useda1 = MP_USED(&a[1]); |
++ useda2 = MP_USED(&a[2]); |
++ useda3 = MP_USED(&a[3]); |
++ |
++ ARGCHK(MP_SIGN(&a[0]) == MP_ZPOS, MP_BADARG); |
++ ARGCHK(MP_SIGN(&a[1]) == MP_ZPOS, MP_BADARG); |
++ ARGCHK(MP_SIGN(&a[2]) == MP_ZPOS, MP_BADARG); |
++ ARGCHK(MP_SIGN(&a[3]) == MP_ZPOS, MP_BADARG); |
++ ARGCHK(useda0 <= b_size, MP_BADARG); |
++ ARGCHK(useda1 <= b_size, MP_BADARG); |
++ ARGCHK(useda2 <= b_size, MP_BADARG); |
++ ARGCHK(useda3 <= b_size, MP_BADARG); |
++ |
++#define SAFE_FETCH(digit, used, word) ((word) < (used) ? (digit[word]) : 0) |
++ |
++ for (i=0; i < b_size; i++) { |
++ mp_digit d0 = SAFE_FETCH(digitsa0,useda0,i); |
++ mp_digit d1 = SAFE_FETCH(digitsa1,useda1,i); |
++ mp_digit d2 = SAFE_FETCH(digitsa2,useda2,i); |
++ mp_digit d3 = SAFE_FETCH(digitsa3,useda3,i); |
++ register mp_weave_word acc; |
++ |
++/* |
++ * ONE_STEP takes the MSB of each of our current digits and places that |
++ * byte in the appropriate position for writing to the weaved array. |
++ * On little endian: |
++ * b3 b2 b1 b0 |
++ * On big endian: |
++ * b0 b1 b2 b3 |
++ * When the data is written it would always wind up: |
++ * b[0] = b0 |
++ * b[1] = b1 |
++ * b[2] = b2 |
++ * b[3] = b3 |
++ * |
++ * Once we've written the MSB, we shift the whole digit up left one |
++ * byte, putting the Next Most Significant Byte in the MSB position, |
++ * so we we repeat the next one step that byte will be written. |
++ * NOTE: This code assumes sizeof(mp_weave_word) and MP_WEAVE_WORD_SIZE |
++ * is 4. |
++ */ |
++#ifdef MP_IS_LITTLE_ENDIAN |
++#define MPI_WEAVE_ONE_STEP \ |
++ acc = (d0 >> (MP_DIGIT_BIT-8)) & 0x000000ff; d0 <<= 8; /*b0*/ \ |
++ acc |= (d1 >> (MP_DIGIT_BIT-16)) & 0x0000ff00; d1 <<= 8; /*b1*/ \ |
++ acc |= (d2 >> (MP_DIGIT_BIT-24)) & 0x00ff0000; d2 <<= 8; /*b2*/ \ |
++ acc |= (d3 >> (MP_DIGIT_BIT-32)) & 0xff000000; d3 <<= 8; /*b3*/ \ |
++ *weaved = acc; weaved += count; |
++#else |
++#define MPI_WEAVE_ONE_STEP \ |
++ acc = (d0 >> (MP_DIGIT_BIT-32)) & 0xff000000; d0 <<= 8; /*b0*/ \ |
++ acc |= (d1 >> (MP_DIGIT_BIT-24)) & 0x00ff0000; d1 <<= 8; /*b1*/ \ |
++ acc |= (d2 >> (MP_DIGIT_BIT-16)) & 0x0000ff00; d2 <<= 8; /*b2*/ \ |
++ acc |= (d3 >> (MP_DIGIT_BIT-8)) & 0x000000ff; d3 <<= 8; /*b3*/ \ |
++ *weaved = acc; weaved += count; |
++#endif |
++ switch (sizeof(mp_digit)) { |
++ case 32: |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ case 16: |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ case 8: |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ case 4: |
++ MPI_WEAVE_ONE_STEP |
++ MPI_WEAVE_ONE_STEP |
++ case 2: |
++ MPI_WEAVE_ONE_STEP |
++ case 1: |
++ MPI_WEAVE_ONE_STEP |
++ break; |
++ } |
++ } |
++ |
++ return MP_OKAY; |
++} |
++ |
++/* reverse the operation above for one entry. |
++ * b points to the offset into the weave array of the power we are |
++ * calculating */ |
++mp_err weave_to_mpi(mp_int *a, const unsigned char *b, |
++ mp_size b_size, mp_size count) |
++{ |
++ mp_digit *pb = MP_DIGITS(a); |
++ mp_digit *end = &pb[b_size]; |
++ |
++ MP_SIGN(a) = MP_ZPOS; |
++ MP_USED(a) = b_size; |
++ |
++ for (; pb < end; pb++) { |
++ register mp_digit digit; |
++ |
++ digit = *b << 8; b += count; |
++#define MPI_UNWEAVE_ONE_STEP digit |= *b; b += count; digit = digit << 8; |
++ switch (sizeof(mp_digit)) { |
++ case 32: |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ case 16: |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ case 8: |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ case 4: |
++ MPI_UNWEAVE_ONE_STEP |
++ MPI_UNWEAVE_ONE_STEP |
++ case 2: |
++ break; |
++ } |
++ digit |= *b; b += count; |
++ |
++ *pb = digit; |
++ } |
++ s_mp_clamp(a); |
++ return MP_OKAY; |
++} |
++#endif |
++ |
++ |
++#define SQR(a,b) \ |
++ MP_CHECKOK( mp_sqr(a, b) );\ |
++ MP_CHECKOK( s_mp_redc(b, mmm) ) |
++ |
++#if defined(MP_MONT_USE_MP_MUL) |
++#define MUL_NOWEAVE(x,a,b) \ |
++ MP_CHECKOK( mp_mul(a, x, b) ); \ |
++ MP_CHECKOK( s_mp_redc(b, mmm) ) |
++#else |
++#define MUL_NOWEAVE(x,a,b) \ |
++ MP_CHECKOK( s_mp_mul_mont(a, x, b, mmm) ) |
++#endif |
++ |
++#define MUL(x,a,b) \ |
++ MP_CHECKOK( weave_to_mpi(&tmp, powers + (x), nLen, num_powers) ); \ |
++ MUL_NOWEAVE(&tmp,a,b) |
++ |
++#define SWAPPA ptmp = pa1; pa1 = pa2; pa2 = ptmp |
++#define MP_ALIGN(x,y) ((((ptrdiff_t)(x))+((y)-1))&(((ptrdiff_t)0)-(y))) |
++ |
++/* Do modular exponentiation using integer multiply code. */ |
++mp_err mp_exptmod_safe_i(const mp_int * montBase, |
++ const mp_int * exponent, |
++ const mp_int * modulus, |
++ mp_int * result, |
++ mp_mont_modulus *mmm, |
++ int nLen, |
++ mp_size bits_in_exponent, |
++ mp_size window_bits, |
++ mp_size num_powers) |
++{ |
++ mp_int *pa1, *pa2, *ptmp; |
++ mp_size i; |
++ mp_size first_window; |
++ mp_err res; |
++ int expOff; |
++ mp_int accum1, accum2, accum[WEAVE_WORD_SIZE]; |
++ mp_int tmp; |
++ unsigned char *powersArray; |
++ unsigned char *powers; |
++ |
++ MP_DIGITS(&accum1) = 0; |
++ MP_DIGITS(&accum2) = 0; |
++ MP_DIGITS(&accum[0]) = 0; |
++ MP_DIGITS(&accum[1]) = 0; |
++ MP_DIGITS(&accum[2]) = 0; |
++ MP_DIGITS(&accum[3]) = 0; |
++ MP_DIGITS(&tmp) = 0; |
++ |
++ powersArray = (unsigned char *)malloc(num_powers*(nLen*sizeof(mp_digit)+1)); |
++ if (powersArray == NULL) { |
++ res = MP_MEM; |
++ goto CLEANUP; |
++ } |
++ |
++ /* powers[i] = base ** (i); */ |
++ powers = (unsigned char *)MP_ALIGN(powersArray,num_powers); |
++ |
++ /* grab the first window value. This allows us to preload accumulator1 |
++ * and save a conversion, some squares and a multiple*/ |
++ MP_CHECKOK( mpl_get_bits(exponent, |
++ bits_in_exponent-window_bits, window_bits) ); |
++ first_window = (mp_size)res; |
++ |
++ MP_CHECKOK( mp_init_size(&accum1, 3 * nLen + 2) ); |
++ MP_CHECKOK( mp_init_size(&accum2, 3 * nLen + 2) ); |
++ MP_CHECKOK( mp_init_size(&tmp, 3 * nLen + 2) ); |
++ |
++ /* build the first WEAVE_WORD powers inline */ |
++ /* if WEAVE_WORD_SIZE is not 4, this code will have to change */ |
++ if (num_powers > 2) { |
++ MP_CHECKOK( mp_init_size(&accum[0], 3 * nLen + 2) ); |
++ MP_CHECKOK( mp_init_size(&accum[1], 3 * nLen + 2) ); |
++ MP_CHECKOK( mp_init_size(&accum[2], 3 * nLen + 2) ); |
++ MP_CHECKOK( mp_init_size(&accum[3], 3 * nLen + 2) ); |
++ mp_set(&accum[0], 1); |
++ MP_CHECKOK( s_mp_to_mont(&accum[0], mmm, &accum[0]) ); |
++ MP_CHECKOK( mp_copy(montBase, &accum[1]) ); |
++ SQR(montBase, &accum[2]); |
++ MUL_NOWEAVE(montBase, &accum[2], &accum[3]); |
++ MP_CHECKOK( mpi_to_weave(accum, powers, nLen, num_powers) ); |
++ if (first_window < 4) { |
++ MP_CHECKOK( mp_copy(&accum[first_window], &accum1) ); |
++ first_window = num_powers; |
++ } |
++ } else { |
++ if (first_window == 0) { |
++ mp_set(&accum1, 1); |
++ MP_CHECKOK( s_mp_to_mont(&accum1, mmm, &accum1) ); |
++ } else { |
++ /* assert first_window == 1? */ |
++ MP_CHECKOK( mp_copy(montBase, &accum1) ); |
++ } |
++ } |
++ |
++ /* |
++ * calculate all the powers in the powers array. |
++ * this adds 2**(k-1)-2 square operations over just calculating the |
++ * odd powers where k is the window size in the two other mp_modexpt |
++ * implementations in this file. We will get some of that |
++ * back by not needing the first 'k' squares and one multiply for the |
++ * first window */ |
++ for (i = WEAVE_WORD_SIZE; i < num_powers; i++) { |
++ int acc_index = i & (WEAVE_WORD_SIZE-1); /* i % WEAVE_WORD_SIZE */ |
++ if ( i & 1 ) { |
++ MUL_NOWEAVE(montBase, &accum[acc_index-1] , &accum[acc_index]); |
++ /* we've filled the array do our 'per array' processing */ |
++ if (acc_index == (WEAVE_WORD_SIZE-1)) { |
++ MP_CHECKOK( mpi_to_weave(accum, powers + i - (WEAVE_WORD_SIZE-1), |
++ nLen, num_powers) ); |
++ |
++ if (first_window <= i) { |
++ MP_CHECKOK( mp_copy(&accum[first_window & (WEAVE_WORD_SIZE-1)], |
++ &accum1) ); |
++ first_window = num_powers; |
++ } |
++ } |
++ } else { |
++ /* up to 8 we can find 2^i-1 in the accum array, but at 8 we our source |
++ * and target are the same so we need to copy.. After that, the |
++ * value is overwritten, so we need to fetch it from the stored |
++ * weave array */ |
++ if (i > 2* WEAVE_WORD_SIZE) { |
++ MP_CHECKOK(weave_to_mpi(&accum2, powers+i/2, nLen, num_powers)); |
++ SQR(&accum2, &accum[acc_index]); |
++ } else { |
++ int half_power_index = (i/2) & (WEAVE_WORD_SIZE-1); |
++ if (half_power_index == acc_index) { |
++ /* copy is cheaper than weave_to_mpi */ |
++ MP_CHECKOK(mp_copy(&accum[half_power_index], &accum2)); |
++ SQR(&accum2,&accum[acc_index]); |
++ } else { |
++ SQR(&accum[half_power_index],&accum[acc_index]); |
++ } |
++ } |
++ } |
++ } |
++ /* if the accum1 isn't set, Then there is something wrong with our logic |
++ * above and is an internal programming error. |
++ */ |
++#if MP_ARGCHK == 2 |
++ assert(MP_USED(&accum1) != 0); |
++#endif |
++ |
++ /* set accumulator to montgomery residue of 1 */ |
++ pa1 = &accum1; |
++ pa2 = &accum2; |
++ |
++ for (expOff = bits_in_exponent - window_bits*2; expOff >= 0; expOff -= window_bits) { |
++ mp_size smallExp; |
++ MP_CHECKOK( mpl_get_bits(exponent, expOff, window_bits) ); |
++ smallExp = (mp_size)res; |
++ |
++ /* handle unroll the loops */ |
++ switch (window_bits) { |
++ case 1: |
++ if (!smallExp) { |
++ SQR(pa1,pa2); SWAPPA; |
++ } else if (smallExp & 1) { |
++ SQR(pa1,pa2); MUL_NOWEAVE(montBase,pa2,pa1); |
++ } else { |
++ ABORT; |
++ } |
++ break; |
++ case 6: |
++ SQR(pa1,pa2); SQR(pa2,pa1); |
++ /* fall through */ |
++ case 4: |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ MUL(smallExp, pa1,pa2); SWAPPA; |
++ break; |
++ case 5: |
++ SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); |
++ SQR(pa1,pa2); MUL(smallExp,pa2,pa1); |
++ break; |
++ default: |
++ ABORT; /* could do a loop? */ |
++ } |
++ } |
++ |
++ res = s_mp_redc(pa1, mmm); |
++ mp_exch(pa1, result); |
++ |
++CLEANUP: |
++ mp_clear(&accum1); |
++ mp_clear(&accum2); |
++ mp_clear(&accum[0]); |
++ mp_clear(&accum[1]); |
++ mp_clear(&accum[2]); |
++ mp_clear(&accum[3]); |
++ mp_clear(&tmp); |
++ /* PORT_Memset(powers,0,num_powers*nLen*sizeof(mp_digit)); */ |
++ free(powersArray); |
++ return res; |
++} |
++#undef SQR |
++#undef MUL |
++#endif |
++ |
++mp_err mp_exptmod(const mp_int *inBase, const mp_int *exponent, |
++ const mp_int *modulus, mp_int *result) |
++{ |
++ const mp_int *base; |
++ mp_size bits_in_exponent, i, window_bits, odd_ints; |
++ mp_err res; |
++ int nLen; |
++ mp_int montBase, goodBase; |
++ mp_mont_modulus mmm; |
++#ifdef MP_USING_CACHE_SAFE_MOD_EXP |
++ static unsigned int max_window_bits; |
++#endif |
++ |
++ /* function for computing n0prime only works if n0 is odd */ |
++ if (!mp_isodd(modulus)) |
++ return s_mp_exptmod(inBase, exponent, modulus, result); |
++ |
++ MP_DIGITS(&montBase) = 0; |
++ MP_DIGITS(&goodBase) = 0; |
++ |
++ if (mp_cmp(inBase, modulus) < 0) { |
++ base = inBase; |
++ } else { |
++ MP_CHECKOK( mp_init(&goodBase) ); |
++ base = &goodBase; |
++ MP_CHECKOK( mp_mod(inBase, modulus, &goodBase) ); |
++ } |
++ |
++ nLen = MP_USED(modulus); |
++ MP_CHECKOK( mp_init_size(&montBase, 2 * nLen + 2) ); |
++ |
++ mmm.N = *modulus; /* a copy of the mp_int struct */ |
++ i = mpl_significant_bits(modulus); |
++ i += MP_DIGIT_BIT - 1; |
++ mmm.b = i - i % MP_DIGIT_BIT; |
++ |
++ /* compute n0', given n0, n0' = -(n0 ** -1) mod MP_RADIX |
++ ** where n0 = least significant mp_digit of N, the modulus. |
++ */ |
++ mmm.n0prime = 0 - s_mp_invmod_radix( MP_DIGIT(modulus, 0) ); |
++ |
++ MP_CHECKOK( s_mp_to_mont(base, &mmm, &montBase) ); |
++ |
++ bits_in_exponent = mpl_significant_bits(exponent); |
++#ifdef MP_USING_CACHE_SAFE_MOD_EXP |
++ if (mp_using_cache_safe_exp) { |
++ if (bits_in_exponent > 780) |
++ window_bits = 6; |
++ else if (bits_in_exponent > 256) |
++ window_bits = 5; |
++ else if (bits_in_exponent > 20) |
++ window_bits = 4; |
++ /* RSA public key exponents are typically under 20 bits (common values |
++ * are: 3, 17, 65537) and a 4-bit window is inefficient |
++ */ |
++ else |
++ window_bits = 1; |
++ } else |
++#endif |
++ if (bits_in_exponent > 480) |
++ window_bits = 6; |
++ else if (bits_in_exponent > 160) |
++ window_bits = 5; |
++ else if (bits_in_exponent > 20) |
++ window_bits = 4; |
++ /* RSA public key exponents are typically under 20 bits (common values |
++ * are: 3, 17, 65537) and a 4-bit window is inefficient |
++ */ |
++ else |
++ window_bits = 1; |
++ |
++#ifdef MP_USING_CACHE_SAFE_MOD_EXP |
++ /* |
++ * clamp the window size based on |
++ * the cache line size. |
++ */ |
++ if (!max_window_bits) { |
++ unsigned long cache_size = s_mpi_getProcessorLineSize(); |
++ /* processor has no cache, use 'fast' code always */ |
++ if (cache_size == 0) { |
++ mp_using_cache_safe_exp = 0; |
++ } |
++ if ((cache_size == 0) || (cache_size >= 64)) { |
++ max_window_bits = 6; |
++ } else if (cache_size >= 32) { |
++ max_window_bits = 5; |
++ } else if (cache_size >= 16) { |
++ max_window_bits = 4; |
++ } else max_window_bits = 1; /* should this be an assert? */ |
++ } |
++ |
++ /* clamp the window size down before we caclulate bits_in_exponent */ |
++ if (mp_using_cache_safe_exp) { |
++ if (window_bits > max_window_bits) { |
++ window_bits = max_window_bits; |
++ } |
++ } |
++#endif |
++ |
++ odd_ints = 1 << (window_bits - 1); |
++ i = bits_in_exponent % window_bits; |
++ if (i != 0) { |
++ bits_in_exponent += window_bits - i; |
++ } |
++ |
++#ifdef MP_USING_MONT_MULF |
++ if (mp_using_mont_mulf) { |
++ MP_CHECKOK( s_mp_pad(&montBase, nLen) ); |
++ res = mp_exptmod_f(&montBase, exponent, modulus, result, &mmm, nLen, |
++ bits_in_exponent, window_bits, odd_ints); |
++ } else |
++#endif |
++#ifdef MP_USING_CACHE_SAFE_MOD_EXP |
++ if (mp_using_cache_safe_exp) { |
++ res = mp_exptmod_safe_i(&montBase, exponent, modulus, result, &mmm, nLen, |
++ bits_in_exponent, window_bits, 1 << window_bits); |
++ } else |
++#endif |
++ res = mp_exptmod_i(&montBase, exponent, modulus, result, &mmm, nLen, |
++ bits_in_exponent, window_bits, odd_ints); |
++ |
++CLEANUP: |
++ mp_clear(&montBase); |
++ mp_clear(&goodBase); |
++ /* Don't mp_clear mmm.N because it is merely a copy of modulus. |
++ ** Just zap it. |
++ */ |
++ memset(&mmm, 0, sizeof mmm); |
++ return res; |
++} |
+diff --git a/net/third_party/nss/ssl/mpi/mpprime.c b/net/third_party/nss/ssl/mpi/mpprime.c |
+new file mode 100644 |
+index 0000000..ae8e496 |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/mpi/mpprime.c |
+@@ -0,0 +1,617 @@ |
++/* |
++ * mpprime.c |
++ * |
++ * Utilities for finding and working with prime and pseudo-prime |
++ * integers |
++ * |
++ * ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Original Code is the MPI Arbitrary Precision Integer Arithmetic library. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Michael J. Fromberger. |
++ * Portions created by the Initial Developer are Copyright (C) 1997 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * Netscape Communications Corporation |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++ |
++#include "mpi-priv.h" |
++#include "mpprime.h" |
++#include "mplogic.h" |
++#include <stdlib.h> |
++#include <string.h> |
++ |
++#define SMALL_TABLE 0 /* determines size of hard-wired prime table */ |
++ |
++#define RANDOM() rand() |
++ |
++#include "primes.c" /* pull in the prime digit table */ |
++ |
++/* |
++ Test if any of a given vector of digits divides a. If not, MP_NO |
++ is returned; otherwise, MP_YES is returned and 'which' is set to |
++ the index of the integer in the vector which divided a. |
++ */ |
++mp_err s_mpp_divp(mp_int *a, const mp_digit *vec, int size, int *which); |
++ |
++/* {{{ mpp_divis(a, b) */ |
++ |
++/* |
++ mpp_divis(a, b) |
++ |
++ Returns MP_YES if a is divisible by b, or MP_NO if it is not. |
++ */ |
++ |
++mp_err mpp_divis(mp_int *a, mp_int *b) |
++{ |
++ mp_err res; |
++ mp_int rem; |
++ |
++ if((res = mp_init(&rem)) != MP_OKAY) |
++ return res; |
++ |
++ if((res = mp_mod(a, b, &rem)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ if(mp_cmp_z(&rem) == 0) |
++ res = MP_YES; |
++ else |
++ res = MP_NO; |
++ |
++CLEANUP: |
++ mp_clear(&rem); |
++ return res; |
++ |
++} /* end mpp_divis() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mpp_divis_d(a, d) */ |
++ |
++/* |
++ mpp_divis_d(a, d) |
++ |
++ Return MP_YES if a is divisible by d, or MP_NO if it is not. |
++ */ |
++ |
++mp_err mpp_divis_d(mp_int *a, mp_digit d) |
++{ |
++ mp_err res; |
++ mp_digit rem; |
++ |
++ ARGCHK(a != NULL, MP_BADARG); |
++ |
++ if(d == 0) |
++ return MP_NO; |
++ |
++ if((res = mp_mod_d(a, d, &rem)) != MP_OKAY) |
++ return res; |
++ |
++ if(rem == 0) |
++ return MP_YES; |
++ else |
++ return MP_NO; |
++ |
++} /* end mpp_divis_d() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mpp_random(a) */ |
++ |
++/* |
++ mpp_random(a) |
++ |
++ Assigns a random value to a. This value is generated using the |
++ standard C library's rand() function, so it should not be used for |
++ cryptographic purposes, but it should be fine for primality testing, |
++ since all we really care about there is good statistical properties. |
++ |
++ As many digits as a currently has are filled with random digits. |
++ */ |
++ |
++mp_err mpp_random(mp_int *a) |
++ |
++{ |
++ mp_digit next = 0; |
++ unsigned int ix, jx; |
++ |
++ ARGCHK(a != NULL, MP_BADARG); |
++ |
++ for(ix = 0; ix < USED(a); ix++) { |
++ for(jx = 0; jx < sizeof(mp_digit); jx++) { |
++ next = (next << CHAR_BIT) | (RANDOM() & UCHAR_MAX); |
++ } |
++ DIGIT(a, ix) = next; |
++ } |
++ |
++ return MP_OKAY; |
++ |
++} /* end mpp_random() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mpp_random_size(a, prec) */ |
++ |
++mp_err mpp_random_size(mp_int *a, mp_size prec) |
++{ |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && prec > 0, MP_BADARG); |
++ |
++ if((res = s_mp_pad(a, prec)) != MP_OKAY) |
++ return res; |
++ |
++ return mpp_random(a); |
++ |
++} /* end mpp_random_size() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mpp_divis_vector(a, vec, size, which) */ |
++ |
++/* |
++ mpp_divis_vector(a, vec, size, which) |
++ |
++ Determines if a is divisible by any of the 'size' digits in vec. |
++ Returns MP_YES and sets 'which' to the index of the offending digit, |
++ if it is; returns MP_NO if it is not. |
++ */ |
++ |
++mp_err mpp_divis_vector(mp_int *a, const mp_digit *vec, int size, int *which) |
++{ |
++ ARGCHK(a != NULL && vec != NULL && size > 0, MP_BADARG); |
++ |
++ return s_mpp_divp(a, vec, size, which); |
++ |
++} /* end mpp_divis_vector() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mpp_divis_primes(a, np) */ |
++ |
++/* |
++ mpp_divis_primes(a, np) |
++ |
++ Test whether a is divisible by any of the first 'np' primes. If it |
++ is, returns MP_YES and sets *np to the value of the digit that did |
++ it. If not, returns MP_NO. |
++ */ |
++mp_err mpp_divis_primes(mp_int *a, mp_digit *np) |
++{ |
++ int size, which; |
++ mp_err res; |
++ |
++ ARGCHK(a != NULL && np != NULL, MP_BADARG); |
++ |
++ size = (int)*np; |
++ if(size > prime_tab_size) |
++ size = prime_tab_size; |
++ |
++ res = mpp_divis_vector(a, prime_tab, size, &which); |
++ if(res == MP_YES) |
++ *np = prime_tab[which]; |
++ |
++ return res; |
++ |
++} /* end mpp_divis_primes() */ |
++ |
++/* }}} */ |
++ |
++/* {{{ mpp_fermat(a, w) */ |
++ |
++/* |
++ Using w as a witness, try pseudo-primality testing based on Fermat's |
++ little theorem. If a is prime, and (w, a) = 1, then w^a == w (mod |
++ a). So, we compute z = w^a (mod a) and compare z to w; if they are |
++ equal, the test passes and we return MP_YES. Otherwise, we return |
++ MP_NO. |
++ */ |
++mp_err mpp_fermat(mp_int *a, mp_digit w) |
++{ |
++ mp_int base, test; |
++ mp_err res; |
++ |
++ if((res = mp_init(&base)) != MP_OKAY) |
++ return res; |
++ |
++ mp_set(&base, w); |
++ |
++ if((res = mp_init(&test)) != MP_OKAY) |
++ goto TEST; |
++ |
++ /* Compute test = base^a (mod a) */ |
++ if((res = mp_exptmod(&base, a, a, &test)) != MP_OKAY) |
++ goto CLEANUP; |
++ |
++ |
++ if(mp_cmp(&base, &test) == 0) |
++ res = MP_YES; |
++ else |
++ res = MP_NO; |
++ |
++ CLEANUP: |
++ mp_clear(&test); |
++ TEST: |
++ mp_clear(&base); |
++ |
++ return res; |
++ |
++} /* end mpp_fermat() */ |
++ |
++/* }}} */ |
++ |
++/* |
++ Perform the fermat test on each of the primes in a list until |
++ a) one of them shows a is not prime, or |
++ b) the list is exhausted. |
++ Returns: MP_YES if it passes tests. |
++ MP_NO if fermat test reveals it is composite |
++ Some MP error code if some other error occurs. |
++ */ |
++mp_err mpp_fermat_list(mp_int *a, const mp_digit *primes, mp_size nPrimes) |
++{ |
++ mp_err rv = MP_YES; |
++ |
++ while (nPrimes-- > 0 && rv == MP_YES) { |
++ rv = mpp_fermat(a, *primes++); |
++ } |
++ return rv; |
++} |
++ |
++/* {{{ mpp_pprime(a, nt) */ |
++ |
++/* |
++ mpp_pprime(a, nt) |
++ |
++ Performs nt iteration of the Miller-Rabin probabilistic primality |
++ test on a. Returns MP_YES if the tests pass, MP_NO if one fails. |
++ If MP_NO is returned, the number is definitely composite. If MP_YES |
++ is returned, it is probably prime (but that is not guaranteed). |
++ */ |
++ |
++mp_err mpp_pprime(mp_int *a, int nt) |
++{ |
++ mp_err res; |
++ mp_int x, amo, m, z; /* "amo" = "a minus one" */ |
++ int iter; |
++ unsigned int jx; |
++ mp_size b; |
++ |
++ ARGCHK(a != NULL, MP_BADARG); |
++ |
++ MP_DIGITS(&x) = 0; |
++ MP_DIGITS(&amo) = 0; |
++ MP_DIGITS(&m) = 0; |
++ MP_DIGITS(&z) = 0; |
++ |
++ /* Initialize temporaries... */ |
++ MP_CHECKOK( mp_init(&amo)); |
++ /* Compute amo = a - 1 for what follows... */ |
++ MP_CHECKOK( mp_sub_d(a, 1, &amo) ); |
++ |
++ b = mp_trailing_zeros(&amo); |
++ if (!b) { /* a was even ? */ |
++ res = MP_NO; |
++ goto CLEANUP; |
++ } |
++ |
++ MP_CHECKOK( mp_init_size(&x, MP_USED(a)) ); |
++ MP_CHECKOK( mp_init(&z) ); |
++ MP_CHECKOK( mp_init(&m) ); |
++ MP_CHECKOK( mp_div_2d(&amo, b, &m, 0) ); |
++ |
++ /* Do the test nt times... */ |
++ for(iter = 0; iter < nt; iter++) { |
++ |
++ /* Choose a random value for 1 < x < a */ |
++ s_mp_pad(&x, USED(a)); |
++ mpp_random(&x); |
++ MP_CHECKOK( mp_mod(&x, a, &x) ); |
++ if(mp_cmp_d(&x, 1) <= 0) { |
++ iter--; /* don't count this iteration */ |
++ continue; /* choose a new x */ |
++ } |
++ |
++ /* Compute z = (x ** m) mod a */ |
++ MP_CHECKOK( mp_exptmod(&x, &m, a, &z) ); |
++ |
++ if(mp_cmp_d(&z, 1) == 0 || mp_cmp(&z, &amo) == 0) { |
++ res = MP_YES; |
++ continue; |
++ } |
++ |
++ res = MP_NO; /* just in case the following for loop never executes. */ |
++ for (jx = 1; jx < b; jx++) { |
++ /* z = z^2 (mod a) */ |
++ MP_CHECKOK( mp_sqrmod(&z, a, &z) ); |
++ res = MP_NO; /* previous line set res to MP_YES */ |
++ |
++ if(mp_cmp_d(&z, 1) == 0) { |
++ break; |
++ } |
++ if(mp_cmp(&z, &amo) == 0) { |
++ res = MP_YES; |
++ break; |
++ } |
++ } /* end testing loop */ |
++ |
++ /* If the test passes, we will continue iterating, but a failed |
++ test means the candidate is definitely NOT prime, so we will |
++ immediately break out of this loop |
++ */ |
++ if(res == MP_NO) |
++ break; |
++ |
++ } /* end iterations loop */ |
++ |
++CLEANUP: |
++ mp_clear(&m); |
++ mp_clear(&z); |
++ mp_clear(&x); |
++ mp_clear(&amo); |
++ return res; |
++ |
++} /* end mpp_pprime() */ |
++ |
++/* }}} */ |
++ |
++/* Produce table of composites from list of primes and trial value. |
++** trial must be odd. List of primes must not include 2. |
++** sieve should have dimension >= MAXPRIME/2, where MAXPRIME is largest |
++** prime in list of primes. After this function is finished, |
++** if sieve[i] is non-zero, then (trial + 2*i) is composite. |
++** Each prime used in the sieve costs one division of trial, and eliminates |
++** one or more values from the search space. (3 eliminates 1/3 of the values |
++** alone!) Each value left in the search space costs 1 or more modular |
++** exponentations. So, these divisions are a bargain! |
++*/ |
++mp_err mpp_sieve(mp_int *trial, const mp_digit *primes, mp_size nPrimes, |
++ unsigned char *sieve, mp_size nSieve) |
++{ |
++ mp_err res; |
++ mp_digit rem; |
++ mp_size ix; |
++ unsigned long offset; |
++ |
++ memset(sieve, 0, nSieve); |
++ |
++ for(ix = 0; ix < nPrimes; ix++) { |
++ mp_digit prime = primes[ix]; |
++ mp_size i; |
++ if((res = mp_mod_d(trial, prime, &rem)) != MP_OKAY) |
++ return res; |
++ |
++ if (rem == 0) { |
++ offset = 0; |
++ } else { |
++ offset = prime - (rem / 2); |
++ } |
++ for (i = offset; i < nSieve ; i += prime) { |
++ sieve[i] = 1; |
++ } |
++ } |
++ |
++ return MP_OKAY; |
++} |
++ |
++#define SIEVE_SIZE 32*1024 |
++ |
++mp_err mpp_make_prime(mp_int *start, mp_size nBits, mp_size strong, |
++ unsigned long * nTries) |
++{ |
++ mp_digit np; |
++ mp_err res; |
++ int i = 0; |
++ mp_int trial; |
++ mp_int q; |
++ mp_size num_tests; |
++ unsigned char *sieve; |
++ |
++ ARGCHK(start != 0, MP_BADARG); |
++ ARGCHK(nBits > 16, MP_RANGE); |
++ |
++ sieve = malloc(SIEVE_SIZE); |
++ ARGCHK(sieve != NULL, MP_MEM); |
++ |
++ MP_DIGITS(&trial) = 0; |
++ MP_DIGITS(&q) = 0; |
++ MP_CHECKOK( mp_init(&trial) ); |
++ MP_CHECKOK( mp_init(&q) ); |
++ /* values taken from table 4.4, HandBook of Applied Cryptography */ |
++ if (nBits >= 1300) { |
++ num_tests = 2; |
++ } else if (nBits >= 850) { |
++ num_tests = 3; |
++ } else if (nBits >= 650) { |
++ num_tests = 4; |
++ } else if (nBits >= 550) { |
++ num_tests = 5; |
++ } else if (nBits >= 450) { |
++ num_tests = 6; |
++ } else if (nBits >= 400) { |
++ num_tests = 7; |
++ } else if (nBits >= 350) { |
++ num_tests = 8; |
++ } else if (nBits >= 300) { |
++ num_tests = 9; |
++ } else if (nBits >= 250) { |
++ num_tests = 12; |
++ } else if (nBits >= 200) { |
++ num_tests = 15; |
++ } else if (nBits >= 150) { |
++ num_tests = 18; |
++ } else if (nBits >= 100) { |
++ num_tests = 27; |
++ } else |
++ num_tests = 50; |
++ |
++ if (strong) |
++ --nBits; |
++ MP_CHECKOK( mpl_set_bit(start, nBits - 1, 1) ); |
++ MP_CHECKOK( mpl_set_bit(start, 0, 1) ); |
++ for (i = mpl_significant_bits(start) - 1; i >= nBits; --i) { |
++ MP_CHECKOK( mpl_set_bit(start, i, 0) ); |
++ } |
++ /* start sieveing with prime value of 3. */ |
++ MP_CHECKOK(mpp_sieve(start, prime_tab + 1, prime_tab_size - 1, |
++ sieve, SIEVE_SIZE) ); |
++ |
++#ifdef DEBUG_SIEVE |
++ res = 0; |
++ for (i = 0; i < SIEVE_SIZE; ++i) { |
++ if (!sieve[i]) |
++ ++res; |
++ } |
++ fprintf(stderr,"sieve found %d potential primes.\n", res); |
++#define FPUTC(x,y) fputc(x,y) |
++#else |
++#define FPUTC(x,y) |
++#endif |
++ |
++ res = MP_NO; |
++ for(i = 0; i < SIEVE_SIZE; ++i) { |
++ if (sieve[i]) /* this number is composite */ |
++ continue; |
++ MP_CHECKOK( mp_add_d(start, 2 * i, &trial) ); |
++ FPUTC('.', stderr); |
++ /* run a Fermat test */ |
++ res = mpp_fermat(&trial, 2); |
++ if (res != MP_OKAY) { |
++ if (res == MP_NO) |
++ continue; /* was composite */ |
++ goto CLEANUP; |
++ } |
++ |
++ FPUTC('+', stderr); |
++ /* If that passed, run some Miller-Rabin tests */ |
++ res = mpp_pprime(&trial, num_tests); |
++ if (res != MP_OKAY) { |
++ if (res == MP_NO) |
++ continue; /* was composite */ |
++ goto CLEANUP; |
++ } |
++ FPUTC('!', stderr); |
++ |
++ if (!strong) |
++ break; /* success !! */ |
++ |
++ /* At this point, we have strong evidence that our candidate |
++ is itself prime. If we want a strong prime, we need now |
++ to test q = 2p + 1 for primality... |
++ */ |
++ MP_CHECKOK( mp_mul_2(&trial, &q) ); |
++ MP_CHECKOK( mp_add_d(&q, 1, &q) ); |
++ |
++ /* Test q for small prime divisors ... */ |
++ np = prime_tab_size; |
++ res = mpp_divis_primes(&q, &np); |
++ if (res == MP_YES) { /* is composite */ |
++ mp_clear(&q); |
++ continue; |
++ } |
++ if (res != MP_NO) |
++ goto CLEANUP; |
++ |
++ /* And test with Fermat, as with its parent ... */ |
++ res = mpp_fermat(&q, 2); |
++ if (res != MP_YES) { |
++ mp_clear(&q); |
++ if (res == MP_NO) |
++ continue; /* was composite */ |
++ goto CLEANUP; |
++ } |
++ |
++ /* And test with Miller-Rabin, as with its parent ... */ |
++ res = mpp_pprime(&q, num_tests); |
++ if (res != MP_YES) { |
++ mp_clear(&q); |
++ if (res == MP_NO) |
++ continue; /* was composite */ |
++ goto CLEANUP; |
++ } |
++ |
++ /* If it passed, we've got a winner */ |
++ mp_exch(&q, &trial); |
++ mp_clear(&q); |
++ break; |
++ |
++ } /* end of loop through sieved values */ |
++ if (res == MP_YES) |
++ mp_exch(&trial, start); |
++CLEANUP: |
++ mp_clear(&trial); |
++ mp_clear(&q); |
++ if (nTries) |
++ *nTries += i; |
++ if (sieve != NULL) { |
++ memset(sieve, 0, SIEVE_SIZE); |
++ free (sieve); |
++ } |
++ return res; |
++} |
++ |
++/*========================================================================*/ |
++/*------------------------------------------------------------------------*/ |
++/* Static functions visible only to the library internally */ |
++ |
++/* {{{ s_mpp_divp(a, vec, size, which) */ |
++ |
++/* |
++ Test for divisibility by members of a vector of digits. Returns |
++ MP_NO if a is not divisible by any of them; returns MP_YES and sets |
++ 'which' to the index of the offender, if it is. Will stop on the |
++ first digit against which a is divisible. |
++ */ |
++ |
++mp_err s_mpp_divp(mp_int *a, const mp_digit *vec, int size, int *which) |
++{ |
++ mp_err res; |
++ mp_digit rem; |
++ |
++ int ix; |
++ |
++ for(ix = 0; ix < size; ix++) { |
++ if((res = mp_mod_d(a, vec[ix], &rem)) != MP_OKAY) |
++ return res; |
++ |
++ if(rem == 0) { |
++ if(which) |
++ *which = ix; |
++ return MP_YES; |
++ } |
++ } |
++ |
++ return MP_NO; |
++ |
++} /* end s_mpp_divp() */ |
++ |
++/* }}} */ |
++ |
++/*------------------------------------------------------------------------*/ |
++/* HERE THERE BE DRAGONS */ |
+diff --git a/net/third_party/nss/ssl/mpi/mpprime.h b/net/third_party/nss/ssl/mpi/mpprime.h |
+new file mode 100644 |
+index 0000000..486d4a1 |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/mpi/mpprime.h |
+@@ -0,0 +1,70 @@ |
++/* |
++ * mpprime.h |
++ * |
++ * Utilities for finding and working with prime and pseudo-prime |
++ * integers |
++ * |
++ * ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Original Code is the MPI Arbitrary Precision Integer Arithmetic library. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Michael J. Fromberger. |
++ * Portions created by the Initial Developer are Copyright (C) 1997 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++ |
++#ifndef _H_MP_PRIME_ |
++#define _H_MP_PRIME_ |
++ |
++#include "mpi.h" |
++ |
++extern const int prime_tab_size; /* number of primes available */ |
++extern const mp_digit prime_tab[]; |
++ |
++/* Tests for divisibility */ |
++mp_err mpp_divis(mp_int *a, mp_int *b); |
++mp_err mpp_divis_d(mp_int *a, mp_digit d); |
++ |
++/* Random selection */ |
++mp_err mpp_random(mp_int *a); |
++mp_err mpp_random_size(mp_int *a, mp_size prec); |
++ |
++/* Pseudo-primality testing */ |
++mp_err mpp_divis_vector(mp_int *a, const mp_digit *vec, int size, int *which); |
++mp_err mpp_divis_primes(mp_int *a, mp_digit *np); |
++mp_err mpp_fermat(mp_int *a, mp_digit w); |
++mp_err mpp_fermat_list(mp_int *a, const mp_digit *primes, mp_size nPrimes); |
++mp_err mpp_pprime(mp_int *a, int nt); |
++mp_err mpp_sieve(mp_int *trial, const mp_digit *primes, mp_size nPrimes, |
++ unsigned char *sieve, mp_size nSieve); |
++mp_err mpp_make_prime(mp_int *start, mp_size nBits, mp_size strong, |
++ unsigned long * nTries); |
++ |
++#endif /* end _H_MP_PRIME_ */ |
+diff --git a/net/third_party/nss/ssl/mpi/secmpi.h b/net/third_party/nss/ssl/mpi/secmpi.h |
+new file mode 100644 |
+index 0000000..e343fb8 |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/mpi/secmpi.h |
+@@ -0,0 +1,61 @@ |
++/* ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Original Code is the Netscape security libraries. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Netscape Communications Corporation. |
++ * Portions created by the Initial Developer are Copyright (C) 1994-2000 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++ |
++#include "mpi.h" |
++ |
++#define CHECK_SEC_OK(func) if (SECSuccess != (rv = func)) goto cleanup |
++ |
++#define CHECK_MPI_OK(func) if (MP_OKAY > (err = func)) goto cleanup |
++ |
++#define OCTETS_TO_MPINT(oc, mp, len) \ |
++ CHECK_MPI_OK(mp_read_unsigned_octets((mp), oc, len)) |
++ |
++#define SECITEM_TO_MPINT(it, mp) \ |
++ CHECK_MPI_OK(mp_read_unsigned_octets((mp), (it).data, (it).len)) |
++ |
++#define MPINT_TO_SECITEM(mp, it, arena) \ |
++ SECITEM_AllocItem(arena, (it), mp_unsigned_octet_size(mp)); \ |
++ if ((it)->data == NULL) {err = MP_MEM; goto cleanup;} \ |
++ err = mp_to_unsigned_octets(mp, (it)->data, (it)->len); \ |
++ if (err < 0) goto cleanup; else err = MP_OKAY; |
++ |
++#define MP_TO_SEC_ERROR(err) \ |
++ switch (err) { \ |
++ case MP_MEM: PORT_SetError(SEC_ERROR_NO_MEMORY); break; \ |
++ case MP_RANGE: PORT_SetError(SEC_ERROR_BAD_DATA); break; \ |
++ case MP_BADARG: PORT_SetError(SEC_ERROR_INVALID_ARGS); break; \ |
++ default: PORT_SetError(SEC_ERROR_LIBRARY_FAILURE); break; \ |
++ } |
+diff --git a/net/third_party/nss/ssl/srp.c b/net/third_party/nss/ssl/srp.c |
+new file mode 100644 |
+index 0000000..a1cb96c |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/srp.c |
+@@ -0,0 +1,550 @@ |
++/* ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Steffen Schulz - pepe (at) cbg.dyndns.org |
++ * |
++ * Portions created by the Initial Developer are Copyright (C) 2007 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++ |
++/* |
++ * This file implements the core SRP algorithms described in rfc 5054 |
++ * for enabling secure password based authentication in TLS via SRP. |
++ * |
++ * See also: |
++ * Wu, T., "SRP-6: Improvements and Refinements to the Secure |
++ * Remote Password Protocol", October 2002, |
++ * <http://srp.stanford.edu/srp6.ps>. |
++ */ |
++ |
++#ifdef FREEBL_NO_DEPEND |
++#include "stubs.h" |
++#endif |
++ |
++#include "secerr.h" |
++#include "blapi.h" |
++#include "mpi/mpi.h" |
++#include "mpi/secmpi.h" |
++#include "secitem.h" |
++#include "keythi.h" |
++#include "plbase64.h" |
++ |
++#include "srp_groups.h" |
++ |
++/* length of srp secret keys in byte */ |
++#define SRP_SECRET_KEY_LEN 32 |
++ |
++ |
++/* check if (N,g) are among the known-good group params */ |
++static SECStatus check_srp_group(const mp_int *N, const mp_int *g) { |
++ int i; |
++ char *N_str; |
++ char *g_str; |
++ mp_err err; |
++ SECStatus rv = SECFailure; |
++ |
++ N_str = PORT_Alloc(mp_radix_size(N, 16)); |
++ g_str = PORT_Alloc(mp_radix_size(g, 16)); |
++ |
++ CHECK_MPI_OK(mp_toradix(N, N_str, 16)); |
++ CHECK_MPI_OK(mp_toradix(g, g_str, 16)); |
++ |
++ /* compare bytes and length */ |
++ for ( i=0; i < SRP_KNOWN_GROUPS; i++) |
++ if (PORT_Strcmp(N_str, known_srp_groups[i].modulus)) |
++ if (PORT_Strcmp(g_str, known_srp_groups[i].generator)) { |
++ rv = SECSuccess; |
++ break; |
++ } |
++ |
++ if (rv !=SECSuccess) |
++ PORT_SetError(SEC_ERROR_SRP_UNSUPPORTED_GROUP); |
++ |
++cleanup: |
++ PORT_Free(N_str); |
++ PORT_Free(g_str); |
++ if (err) { |
++ MP_TO_SEC_ERROR(err); |
++ rv = SECFailure; |
++ } |
++ |
++ return rv; |
++} |
++ |
++/* check if B%N = 0 -> trapdoor */ |
++static SECStatus srp_backdoor_check(const mp_int *N, const mp_int *B) { |
++ |
++ mp_int res; |
++ mp_err err; |
++ |
++ CHECK_MPI_OK(mp_init(&res)); |
++ CHECK_MPI_OK(mp_mod(B, N, &res)); |
++ |
++ |
++ if ( mp_cmp_z(&res) == 0) { |
++ PORT_SetError(SEC_ERROR_SRP_ILLEGAL_PARAMETER); |
++ return SECFailure; |
++ } |
++cleanup: |
++ mp_clear(&res); |
++ if (err) { |
++ MP_TO_SEC_ERROR(err); |
++ return SECFailure; |
++ } |
++ return SECSuccess; |
++} |
++ |
++/* SRP_DeriveKey computes common key 'pms' |
++ * |
++ * The pre-master secret is calculated as follows: |
++ * |
++ * u = SHA1(PAD(A) | PAD(B)) |
++ * k = SHA1(N | PAD(g)) |
++ * pms = (A * v^u) ^ b % N |
++ * |
++ * PAD() left-paddes with \0 until length of N |
++ */ |
++ |
++SECStatus SRP_ServerDerive(SRPPrivateKey *prvKey, SRPDeriveParams *srp, |
++ SECItem *pms) { |
++ mp_int mp_pms, mp_res; |
++ mp_int mp_A, mp_b, mp_v; |
++ mp_int mp_N, mp_g, mp_u, mp_k; |
++ SECItem *it_u, *it_k; |
++ unsigned char *zero; |
++ unsigned int len = srp->N.len; |
++ SHA1Context *ctx = SHA1_NewContext(); |
++ SECStatus rv = SECFailure; |
++ mp_err err = MP_OKAY; |
++ |
++ CHECK_MPI_OK(mp_init(&mp_N)); |
++ CHECK_MPI_OK(mp_init(&mp_g)); |
++ CHECK_MPI_OK(mp_init(&mp_u)); |
++ CHECK_MPI_OK(mp_init(&mp_k)); |
++ CHECK_MPI_OK(mp_init(&mp_v)); |
++ CHECK_MPI_OK(mp_init(&mp_b)); |
++ CHECK_MPI_OK(mp_init(&mp_A)); |
++ CHECK_MPI_OK(mp_init(&mp_res)); |
++ CHECK_MPI_OK(mp_init(&mp_pms)); |
++ |
++ zero = PORT_ZAlloc(len); |
++ it_u = SECITEM_AllocItem(NULL, NULL, SHA1_LENGTH); |
++ it_k = SECITEM_AllocItem(NULL, NULL, SHA1_LENGTH); |
++ |
++ if (!zero || !it_u || !it_k) { |
++ PORT_SetError(SEC_ERROR_NO_MEMORY); |
++ goto cleanup; |
++ } |
++ |
++ /* u = SHA1( PAD(A) | PAD(B) ) */ |
++ SHA1_Begin(ctx); |
++ SHA1_Update(ctx, zero, len - srp->ppub.len); |
++ SHA1_Update(ctx, srp->ppub.data, srp->ppub.len); |
++ SHA1_Update(ctx, zero, len - prvKey->pubKey.len); |
++ SHA1_Update(ctx, prvKey->pubKey.data, prvKey->pubKey.len); |
++ SHA1_End(ctx, it_u->data, &it_u->len, SHA1_LENGTH); |
++ |
++ /* k = SHA1( N | PAD(g) ) */ |
++ SHA1_Begin(ctx); |
++ SHA1_Update(ctx, srp->N.data, srp->N.len); |
++ SHA1_Update(ctx, zero, len - srp->g.len); |
++ SHA1_Update(ctx, srp->g.data, srp->g.len); |
++ SHA1_End(ctx, it_k->data, &it_k->len, SHA1_LENGTH); |
++ |
++ /* |
++ * calculate pms = (A * v^u) ^ b % N |
++ */ |
++ |
++ SECITEM_TO_MPINT(*it_u, &mp_u); |
++ SECITEM_TO_MPINT(*it_k, &mp_k); |
++ SECITEM_TO_MPINT(srp->N, &mp_N); |
++ SECITEM_TO_MPINT(srp->g, &mp_g); |
++ SECITEM_TO_MPINT(srp->ppub,&mp_A); |
++ SECITEM_TO_MPINT(prvKey->secret, &mp_v); |
++ SECITEM_TO_MPINT(prvKey->prvKey, &mp_b); |
++ |
++ CHECK_MPI_OK(mp_exptmod(&mp_v, &mp_u, &mp_N, &mp_res)); |
++ CHECK_MPI_OK(mp_mulmod(&mp_A, &mp_res, &mp_N, &mp_res)); |
++ CHECK_MPI_OK(mp_exptmod(&mp_res, &mp_b, &mp_N, &mp_pms)); |
++ |
++ MPINT_TO_SECITEM(&mp_pms, pms, NULL); |
++ |
++ rv = SECSuccess; |
++cleanup: |
++ PORT_Free(zero); |
++ SECITEM_FreeItem(it_u, PR_TRUE); |
++ SECITEM_FreeItem(it_k, PR_TRUE); |
++ SHA1_DestroyContext(ctx, PR_TRUE); |
++ mp_clear(&mp_N); |
++ mp_clear(&mp_g); |
++ mp_clear(&mp_b); |
++ mp_clear(&mp_A); |
++ mp_clear(&mp_k); |
++ mp_clear(&mp_u); |
++ mp_clear(&mp_v); |
++ mp_clear(&mp_pms); |
++ mp_clear(&mp_res); |
++ if (err) { |
++ MP_TO_SEC_ERROR(err); |
++ rv = SECFailure; |
++ } |
++ return rv; |
++} |
++ |
++/* SRP_ClientDerive, computes common key 'pms' |
++ * |
++ * The pre-master secret is calculated as follows: |
++ * |
++ * u = SHA1(PAD(A) | PAD(B)) |
++ * k = SHA1(N | PAD(g)) |
++ * x = SHA1(s | SHA1(I | ":" | P)) |
++ * pms = (B - (k * g^x)) ^ (a + (u * x)) % N |
++ * |
++ * PAD() left-paddes with \0 until length of N |
++ */ |
++SECStatus SRP_ClientDerive(SRPPrivateKey *prvKey, SRPDeriveParams *srp, |
++ SECItem * pms) { |
++ |
++ /* mp_int use pointers*/ |
++ unsigned char *zero = NULL; |
++ mp_int mp_pms, mp_res1, mp_res2; |
++ mp_int mp_B, mp_a, mp_A; |
++ mp_int mp_N, mp_g, mp_u; |
++ mp_int mp_k, mp_x; |
++ mp_err err = MP_OKAY; |
++ SECItem *it_u = NULL; |
++ SECItem *it_k = NULL; |
++ SECItem *it_x = NULL; |
++ SHA1Context *ctx = SHA1_NewContext(); |
++ unsigned int len = srp->N.len; |
++ SECStatus rv = SECFailure; |
++ |
++ if (prvKey->secret.len == 0) { |
++ /* XXX this error is probably meant for token passwords |
++ * anyway, we use it to show missing password in bypass mode*/ |
++ PORT_SetError(SEC_ERROR_BAD_PASSWORD); |
++ return SECFailure; |
++ } |
++ |
++ CHECK_MPI_OK(mp_init(&mp_N)); |
++ CHECK_MPI_OK(mp_init(&mp_g)); |
++ CHECK_MPI_OK(mp_init(&mp_u)); |
++ CHECK_MPI_OK(mp_init(&mp_k)); |
++ CHECK_MPI_OK(mp_init(&mp_x)); |
++ CHECK_MPI_OK(mp_init(&mp_A)); |
++ CHECK_MPI_OK(mp_init(&mp_a)); |
++ CHECK_MPI_OK(mp_init(&mp_B)); |
++ CHECK_MPI_OK(mp_init(&mp_res1)); |
++ CHECK_MPI_OK(mp_init(&mp_res2)); |
++ CHECK_MPI_OK(mp_init(&mp_pms)); |
++ |
++ /* check server-supplied parameters */ |
++ SECITEM_TO_MPINT(srp->N, &mp_N); |
++ SECITEM_TO_MPINT(srp->g, &mp_g); |
++ SECITEM_TO_MPINT(srp->ppub,&mp_B); |
++ |
++ CHECK_SEC_OK(srp_backdoor_check(&mp_N, &mp_B)); |
++ |
++ /* |
++ * create hashed variables u, k, x |
++ */ |
++ |
++ zero = PORT_ZAlloc(len); |
++ it_u = SECITEM_AllocItem(NULL, NULL, SHA1_LENGTH); |
++ it_k = SECITEM_AllocItem(NULL, NULL, SHA1_LENGTH); |
++ it_x = SECITEM_AllocItem(NULL, NULL, SHA1_LENGTH); |
++ |
++ if (!zero || !it_u || !it_k || !it_x) { |
++ PORT_SetError(SEC_ERROR_NO_MEMORY); |
++ goto cleanup; |
++ } |
++ |
++ /* u = SHA1( PAD(A) | PAD(B) ) */ |
++ SHA1_Begin(ctx); |
++ SHA1_Update(ctx, zero, len - prvKey->pubKey.len); |
++ SHA1_Update(ctx, prvKey->pubKey.data, prvKey->pubKey.len); |
++ SHA1_Update(ctx, zero, len - srp->ppub.len); |
++ SHA1_Update(ctx, srp->ppub.data, srp->ppub.len); |
++ SHA1_End(ctx, it_u->data, &it_u->len, SHA1_LENGTH); |
++ |
++ /* k = SHA1( N | PAD(g) ) */ |
++ SHA1_Begin(ctx); |
++ SHA1_Update(ctx, srp->N.data, srp->N.len); |
++ SHA1_Update(ctx, zero, len - srp->g.len); |
++ SHA1_Update(ctx, srp->g.data, srp->g.len); |
++ SHA1_End(ctx, it_k->data, &it_k->len, SHA1_LENGTH); |
++ |
++ /* x = SHA1(s | SHA1(I | ":" | P)) */ |
++ SHA1_Begin(ctx); |
++ SHA1_Update(ctx, srp->u.data, srp->u.len); |
++ SHA1_Update(ctx,(unsigned char *)":",1); |
++ SHA1_Update(ctx, prvKey->secret.data, prvKey->secret.len); |
++ SHA1_End(ctx, it_x->data, &it_x->len, SHA1_LENGTH); |
++ |
++ SHA1_Begin(ctx); |
++ SHA1_Update(ctx, srp->s.data, srp->s.len); |
++ SHA1_Update(ctx, it_x->data, it_x->len); |
++ SHA1_End(ctx, it_x->data, &it_x->len, SHA1_LENGTH); |
++ |
++ /* |
++ * compute pms = (B - (k * g^x)) ^ (a + (u * x)) % N |
++ */ |
++ |
++ SECITEM_TO_MPINT(*it_u, &mp_u); |
++ SECITEM_TO_MPINT(*it_k, &mp_k); |
++ SECITEM_TO_MPINT(*it_x, &mp_x); |
++ SECITEM_TO_MPINT(prvKey->prvKey, &mp_a); |
++ |
++ CHECK_MPI_OK(mp_exptmod(&mp_g,&mp_x,&mp_N,&mp_res2)); |
++ CHECK_MPI_OK(mp_mulmod(&mp_res2,&mp_k,&mp_N,&mp_res2)); |
++ CHECK_MPI_OK(mp_submod(&mp_B,&mp_res2,&mp_N,&mp_res2)); |
++ CHECK_MPI_OK(mp_mul(&mp_u, &mp_x, &mp_res1)); |
++ CHECK_MPI_OK(mp_add(&mp_res1,&mp_a,&mp_res1)); |
++ CHECK_MPI_OK(mp_exptmod(&mp_res2,&mp_res1,&mp_N,&mp_pms)); |
++ |
++ MPINT_TO_SECITEM(&mp_pms, pms, NULL); |
++ rv = SECSuccess; |
++cleanup: |
++ PORT_Free(zero); |
++ SECITEM_FreeItem(it_u, PR_TRUE); |
++ SECITEM_FreeItem(it_k, PR_TRUE); |
++ SECITEM_FreeItem(it_x, PR_TRUE); |
++ SHA1_DestroyContext(ctx, PR_TRUE); |
++ mp_clear(&mp_N); |
++ mp_clear(&mp_g); |
++ mp_clear(&mp_a); |
++ mp_clear(&mp_A); |
++ mp_clear(&mp_B); |
++ mp_clear(&mp_k); |
++ mp_clear(&mp_u); |
++ mp_clear(&mp_x); |
++ mp_clear(&mp_pms); |
++ mp_clear(&mp_res1); |
++ mp_clear(&mp_res2); |
++ if (err) { |
++ MP_TO_SEC_ERROR(err); |
++ rv = SECFailure; |
++ } |
++ return rv; |
++} |
++ |
++ |
++/* SRP_NewServerKeyPair |
++ * creates a new srp key pair for the server |
++ * |
++ * k = SHA1(N | PAD(g)) |
++ * pubKey = k*v + g^prvKey % N |
++ */ |
++SECStatus SRP_NewServerKeyPair(SRPPrivateKey **prvKey, SRPKeyPairParams *srp) { |
++ |
++ mp_int mp_N, mp_g, mp_pub, mp_prv, mp_k, mp_v, mp_res; |
++ PRArenaPool *arena; |
++ SRPPrivateKey *key; |
++ SECItem *it_k; |
++ unsigned char *zero; |
++ mp_err err = MP_OKAY; |
++ SECStatus rv = SECFailure; |
++ SHA1Context *ctx = SHA1_NewContext(); |
++ |
++ |
++ if (!srp || !prvKey) { |
++ PORT_SetError(SEC_ERROR_INVALID_ARGS); |
++ return SECFailure; |
++ } |
++ arena = PORT_NewArena(NSS_FREEBL_DEFAULT_CHUNKSIZE); |
++ if (!arena) { |
++ PORT_SetError(SEC_ERROR_NO_MEMORY); |
++ return SECFailure; |
++ } |
++ key = (SRPPrivateKey *)PORT_ArenaZAlloc(arena, sizeof(SRPPrivateKey)); |
++ if (!key) { |
++ PORT_FreeArena(arena, PR_TRUE); |
++ PORT_SetError(SEC_ERROR_NO_MEMORY); |
++ return SECFailure; |
++ } |
++ key->arena = arena; |
++ |
++ /* prv=rand() */ |
++ SECITEM_AllocItem(arena, &key->prvKey, SRP_SECRET_KEY_LEN); |
++ rv = RNG_GenerateGlobalRandomBytes(key->prvKey.data, key->prvKey.len); |
++ |
++ if (rv != SECSuccess || !(&key->prvKey)) { |
++ PORT_SetError(SEC_ERROR_NO_MEMORY); |
++ PORT_FreeArena(arena, PR_TRUE); |
++ return SECFailure; |
++ } |
++ |
++ it_k = SECITEM_AllocItem(NULL, NULL, SHA1_LENGTH); |
++ zero = PORT_ZAlloc(srp->N.len); |
++ |
++ if (!zero || !it_k) { |
++ PORT_SetError(SEC_ERROR_NO_MEMORY); |
++ goto cleanup; |
++ } |
++ |
++ /* k = SHA1( N | PAD(g) ) */ |
++ SHA1_Begin(ctx); |
++ SHA1_Update(ctx, srp->N.data, srp->N.len); |
++ SHA1_Update(ctx, zero, srp->N.len - srp->g.len); |
++ SHA1_Update(ctx, srp->g.data, srp->g.len); |
++ SHA1_End(ctx, it_k->data, &it_k->len, SHA1_LENGTH); |
++ |
++ /* |
++ * create key pair |
++ */ |
++ CHECK_MPI_OK( mp_init(&mp_N) ); |
++ CHECK_MPI_OK( mp_init(&mp_g) ); |
++ CHECK_MPI_OK( mp_init(&mp_k) ); |
++ CHECK_MPI_OK( mp_init(&mp_v) ); |
++ CHECK_MPI_OK( mp_init(&mp_pub)); |
++ CHECK_MPI_OK( mp_init(&mp_prv)); |
++ CHECK_MPI_OK( mp_init(&mp_res)); |
++ SECITEM_TO_MPINT(*it_k, &mp_k); |
++ SECITEM_TO_MPINT(srp->N, &mp_N); |
++ SECITEM_TO_MPINT(srp->g, &mp_g); |
++ SECITEM_TO_MPINT(srp->secret, &mp_v); |
++ SECITEM_TO_MPINT(key->prvKey, &mp_prv); |
++ |
++ char *N_str; |
++ char *g_str; |
++ printf("X\n"); |
++ N_str = PORT_ZAlloc(mp_radix_size(&mp_N,16)); |
++ mp_toradix(&mp_N,N_str,16); |
++ printf("%s\n",N_str); |
++ g_str = PORT_ZAlloc(mp_radix_size(&mp_g,16)); |
++ mp_toradix(&mp_g,g_str,16); |
++ printf("%s\n",g_str); |
++ printf("X\n"); |
++ |
++ |
++ /* pub = k*v + g^prv % N */ |
++ CHECK_MPI_OK(mp_exptmod(&mp_g, &mp_prv, &mp_N, &mp_pub)); |
++ CHECK_MPI_OK(mp_mulmod(&mp_k, &mp_v, &mp_N, &mp_res)); |
++ CHECK_MPI_OK(mp_addmod(&mp_res, &mp_pub, &mp_N, &mp_pub)); |
++ |
++ MPINT_TO_SECITEM(&mp_pub, &key->pubKey, arena); |
++ CHECK_SEC_OK(SECITEM_CopyItem(arena, &key->secret, &srp->secret)); |
++ *prvKey = key; |
++ |
++cleanup: |
++ PORT_Free(zero); |
++ SECITEM_FreeItem(it_k,PR_TRUE); |
++ SHA1_DestroyContext(ctx, PR_TRUE); |
++ mp_clear(&mp_N); |
++ mp_clear(&mp_g); |
++ mp_clear(&mp_k); |
++ mp_clear(&mp_v); |
++ mp_clear(&mp_pub); |
++ mp_clear(&mp_prv); |
++ mp_clear(&mp_res); |
++ if (err) { |
++ PORT_FreeArena(arena, PR_TRUE); /* not zeroized!! */ |
++ MP_TO_SEC_ERROR(err); |
++ rv = SECFailure; |
++ } |
++ return rv; |
++} |
++ |
++/* SRP_NewClientKeyPair |
++ * creates a new srp key pair for the client |
++ * |
++ * prv = rand() |
++ * pub = g^prv % N, with prv at least 256bit random |
++ * prvKey->secret = srp->secret |
++ */ |
++ |
++SECStatus SRP_NewClientKeyPair(SRPPrivateKey **prvKey, SRPKeyPairParams *srp) { |
++ |
++ |
++ SRPPrivateKey *key; |
++ PRArenaPool *arena; |
++ mp_int mp_N, mp_g, mp_prv, mp_pub; |
++ mp_err err = MP_OKAY; |
++ SECStatus rv = SECFailure; |
++ |
++ if (!srp || !prvKey) { |
++ PORT_SetError(SEC_ERROR_INVALID_ARGS); |
++ return SECFailure; |
++ } |
++ |
++ arena = PORT_NewArena(NSS_FREEBL_DEFAULT_CHUNKSIZE); |
++ if (!arena) { |
++ PORT_SetError(SEC_ERROR_NO_MEMORY); |
++ return SECFailure; |
++ } |
++ |
++ key = (SRPPrivateKey *)PORT_ArenaZAlloc(arena, sizeof(SRPPrivateKey)); |
++ if (!key) { |
++ PORT_SetError(SEC_ERROR_NO_MEMORY); |
++ PORT_FreeArena(arena, PR_TRUE); |
++ return SECFailure; |
++ } |
++ key->arena = arena; |
++ |
++ /* prv=rand() */ |
++ SECITEM_AllocItem(arena, &key->prvKey, SRP_SECRET_KEY_LEN); |
++ rv = RNG_GenerateGlobalRandomBytes(key->prvKey.data, key->prvKey.len); |
++ |
++ if (rv != SECSuccess || !(&key->prvKey)) { |
++ PORT_SetError(SEC_ERROR_NO_MEMORY); |
++ PORT_FreeArena(arena, PR_TRUE); |
++ return SECFailure; |
++ } |
++ |
++ /* pub = g^prv % N */ |
++ CHECK_MPI_OK( mp_init(&mp_N) ); |
++ CHECK_MPI_OK( mp_init(&mp_g) ); |
++ CHECK_MPI_OK( mp_init(&mp_pub)); |
++ CHECK_MPI_OK( mp_init(&mp_prv)); |
++ SECITEM_TO_MPINT(srp->N, &mp_N); |
++ SECITEM_TO_MPINT(srp->g, &mp_g); |
++ SECITEM_TO_MPINT(key->prvKey, &mp_prv); |
++ |
++ if (SECSuccess != check_srp_group(&mp_N, &mp_g)) |
++ goto cleanup; |
++ |
++ CHECK_MPI_OK( mp_exptmod(&mp_g, &mp_prv, &mp_N, &mp_pub) ); |
++ |
++ MPINT_TO_SECITEM(&mp_pub, &key->pubKey, key->arena); |
++ CHECK_SEC_OK( SECITEM_CopyItem(arena, &key->secret, &srp->secret) ); |
++ *prvKey = key; |
++ |
++cleanup: |
++ mp_clear(&mp_g); |
++ mp_clear(&mp_N); |
++ mp_clear(&mp_pub); |
++ mp_clear(&mp_prv); |
++ if (err) { |
++ PORT_FreeArena(arena, PR_TRUE); /* not zeroized!! */ |
++ MP_TO_SEC_ERROR(err); |
++ rv = SECFailure; |
++ } |
++ return rv; |
++} |
++ |
+diff --git a/net/third_party/nss/ssl/srp_groups.h b/net/third_party/nss/ssl/srp_groups.h |
+new file mode 100644 |
+index 0000000..e327a0f |
+--- /dev/null |
++++ b/net/third_party/nss/ssl/srp_groups.h |
+@@ -0,0 +1,58 @@ |
++/* ***** BEGIN LICENSE BLOCK ***** |
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
++ * |
++ * The contents of this file are subject to the Mozilla Public License Version |
++ * 1.1 (the "License"); you may not use this file except in compliance with |
++ * the License. You may obtain a copy of the License at |
++ * http://www.mozilla.org/MPL/ |
++ * |
++ * Software distributed under the License is distributed on an "AS IS" basis, |
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
++ * for the specific language governing rights and limitations under the |
++ * License. |
++ * |
++ * The Initial Developer of the Original Code is |
++ * Steffen Schulz - pepe (at) cbg.dyndns.org |
++ * |
++ * Portions created by the Initial Developer are Copyright (C) 2007 |
++ * the Initial Developer. All Rights Reserved. |
++ * |
++ * Contributor(s): |
++ * |
++ * Alternatively, the contents of this file may be used under the terms of |
++ * either the GNU General Public License Version 2 or later (the "GPL"), or |
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
++ * in which case the provisions of the GPL or the LGPL are applicable instead |
++ * of those above. If you wish to allow use of your version of this file only |
++ * under the terms of either the GPL or the LGPL, and not to allow others to |
++ * use your version of this file under the terms of the MPL, indicate your |
++ * decision by deleting the provisions above and replace them with the notice |
++ * and other provisions required by the GPL or the LGPL. If you do not delete |
++ * the provisions above, a recipient may use your version of this file under |
++ * the terms of any one of the MPL, the GPL or the LGPL. |
++ * |
++ * ***** END LICENSE BLOCK ***** */ |
++ |
++/* number of known groups */ |
++#define SRP_KNOWN_GROUPS 7 |
++ |
++/* Whitelist of known-good group parameters, taken from RFC 5054. The client |
++ * checks supplied params against this whitelist. There is currently no support |
++ * for application specified group parameters. |
++ */ |
++ |
++struct srp_group { |
++ char *modulus; |
++ char *generator; |
++}; |
++ |
++const struct srp_group known_srp_groups[SRP_KNOWN_GROUPS] = { |
++ { "EEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF7496EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6CE8EF4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4976EAA9AFD5138FE8376435B9FC61D2FC0EB06E3", "2"}, |
++ { "9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA9614B19CC4D5F4F5F556E27CBDE51C6A94BE4607A291558903BA0D0F84380B655BB9A22E8DCDF028A7CEC67F0D08134B1C8B97989149B609E0BE3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E2B9C8CF56EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734AF7CCB7AE837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E8CE7A28C2442C6F315180F93499A234DCF76E3FED135F9BB", "2"}, |
++ { "AC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73", "2"}, |
++ { "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", "5"}, |
++ { "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF", "5",}, |
++ { "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF", "5"}, |
++ { "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF", "13"} |
++}; |
++ |
+diff --git a/net/third_party/nss/ssl/ssl.def b/net/third_party/nss/ssl/ssl.def |
+index 76417d0..0af96ba 100644 |
+--- a/net/third_party/nss/ssl/ssl.def |
++++ b/net/third_party/nss/ssl/ssl.def |
+@@ -136,6 +136,10 @@ SSL_ReHandshakeWithTimeout; |
+ ;+NSS_3.11.8 { # NSS 3.11.8 release |
+ ;+ global: |
+ SSL_CanBypass; |
++ |
++SSL_SetUserLogin; |
++SSL_UserPasswdHook; |
++SSL_GetSRPParamsHook; |
+ ;+ local: |
+ ;+*; |
+ ;+}; |
+diff --git a/net/third_party/nss/ssl/ssl.h b/net/third_party/nss/ssl/ssl.h |
+index 21d7c8d..8a8d53f 100644 |
+--- a/net/third_party/nss/ssl/ssl.h |
++++ b/net/third_party/nss/ssl/ssl.h |
+@@ -437,6 +437,41 @@ SSL_IMPORT PRFileDesc *SSL_ReconfigFD(PRFileDesc *model, PRFileDesc *fd); |
+ */ |
+ SSL_IMPORT SECStatus SSL_SetPKCS11PinArg(PRFileDesc *fd, void *a); |
+ |
++ |
++/* |
++ * Set the client side user name and password non-interactively. |
++ */ |
++SSL_IMPORT SECStatus SSL_SetUserLogin(PRFileDesc *fd, char *u, char *p); |
++ |
++/* |
++ * This sets the client side callback for SSL to retrieve the user password. |
++ * fd - the file descriptor for the connection in question |
++ * func - callback function pointer |
++ * pw - user password |
++ */ |
++ |
++typedef SECStatus (PR_CALLBACK *SSLUserPasswdCB)(PRFileDesc *fd, |
++ SECItem *pw, void *arg); |
++ |
++SSL_IMPORT SECStatus SSL_UserPasswdHook(PRFileDesc *fd, SSLUserPasswdCB func, |
++ void *arg); |
++ |
++/* |
++ * This sets the server side callback function for SSL to retrieve the SRP |
++ * authentication parameters associated with a specific user login. |
++ * fd - the file descriptor of the connection |
++ * func - pointer to the callback function |
++ * user - username to lookup in app database |
++ * srp - SRP auth paramters supplied to SSL by app |
++ */ |
++ |
++typedef SECStatus (PR_CALLBACK *SSLGetSRPParamsCB)(PRFileDesc *fd, |
++ SECKEYSRPParams *srp, |
++ void *arg); |
++ |
++SSL_IMPORT SECStatus SSL_GetSRPParamsHook(PRFileDesc *fd, |
++ SSLGetSRPParamsCB func, void *arg); |
++ |
+ /* |
+ ** This is a callback for dealing with server certs that are not authenticated |
+ ** by the client. The client app can decide that it actually likes the |
+diff --git a/net/third_party/nss/ssl/ssl3con.c b/net/third_party/nss/ssl/ssl3con.c |
+index f5c0880..8f1f9e4 100644 |
+--- a/net/third_party/nss/ssl/ssl3con.c |
++++ b/net/third_party/nss/ssl/ssl3con.c |
+@@ -118,6 +118,9 @@ static ssl3CipherSuiteCfg cipherSuites[ssl_V3_SUITES_IMPLEMENTED] = { |
+ #endif /* NSS_ENABLE_ECC */ |
+ { TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
+ { TLS_RSA_WITH_AES_256_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
++ { TLS_SRP_SHA_WITH_AES_256_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
++ { TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
++ { TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
+ |
+ #ifdef NSS_ENABLE_ECC |
+ { TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
+@@ -141,11 +144,15 @@ static ssl3CipherSuiteCfg cipherSuites[ssl_V3_SUITES_IMPLEMENTED] = { |
+ { SSL_RSA_WITH_RC4_128_MD5, SSL_NOT_ALLOWED, PR_TRUE, PR_FALSE}, |
+ { SSL_RSA_WITH_RC4_128_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
+ { TLS_RSA_WITH_AES_128_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
++ { TLS_SRP_SHA_WITH_AES_128_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
++ { TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
++ { TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
+ |
+ #ifdef NSS_ENABLE_ECC |
+ { TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
+ { TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
+ #endif /* NSS_ENABLE_ECC */ |
++ { TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
+ { SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
+ { SSL_DHE_DSS_WITH_3DES_EDE_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
+ #ifdef NSS_ENABLE_ECC |
+@@ -154,6 +161,8 @@ static ssl3CipherSuiteCfg cipherSuites[ssl_V3_SUITES_IMPLEMENTED] = { |
+ #endif /* NSS_ENABLE_ECC */ |
+ { SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA, SSL_NOT_ALLOWED, PR_TRUE, PR_FALSE}, |
+ { SSL_RSA_WITH_3DES_EDE_CBC_SHA, SSL_NOT_ALLOWED, PR_TRUE, PR_FALSE}, |
++ { TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
++ { TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
+ |
+ |
+ { SSL_DHE_RSA_WITH_DES_CBC_SHA, SSL_NOT_ALLOWED, PR_FALSE,PR_FALSE}, |
+@@ -283,6 +292,9 @@ static const ssl3KEADef kea_defs[] = |
+ {kea_dh_anon, kt_dh, sign_null, PR_FALSE, 0, PR_FALSE}, |
+ {kea_dh_anon_export, kt_dh, sign_null, PR_TRUE, 512, PR_FALSE}, |
+ {kea_rsa_fips, kt_rsa, sign_rsa, PR_FALSE, 0, PR_TRUE }, |
++ {kea_srp, kt_srp, sign_null, PR_FALSE, 0, PR_FALSE}, |
++ {kea_srp_rsa, kt_srp, sign_rsa, PR_FALSE, 0, PR_FALSE}, |
++ {kea_srp_dss, kt_srp, sign_dsa, PR_FALSE, 0, PR_FALSE}, |
+ #ifdef NSS_ENABLE_ECC |
+ {kea_ecdh_ecdsa, kt_ecdh, sign_ecdsa, PR_FALSE, 0, PR_FALSE}, |
+ {kea_ecdhe_ecdsa, kt_ecdh, sign_ecdsa, PR_FALSE, 0, PR_FALSE}, |
+@@ -344,6 +356,21 @@ static const ssl3CipherSuiteDef cipher_suite_defs[] = |
+ |
+ |
+ /* New TLS cipher suites */ |
++ {TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, cipher_3des, mac_sha, kea_srp}, |
++ {TLS_SRP_SHA_WITH_AES_128_CBC_SHA, cipher_aes_128, mac_sha, kea_srp}, |
++ {TLS_SRP_SHA_WITH_AES_256_CBC_SHA, cipher_aes_256, mac_sha, kea_srp}, |
++ {TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, |
++ cipher_3des, mac_sha, kea_srp_rsa}, |
++ {TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, |
++ cipher_3des, mac_sha, kea_srp_dss}, |
++ {TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, |
++ cipher_aes_128, mac_sha, kea_srp_rsa}, |
++ {TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, |
++ cipher_aes_128, mac_sha, kea_srp_dss}, |
++ {TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, |
++ cipher_aes_256, mac_sha, kea_srp_rsa}, |
++ {TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, |
++ cipher_aes_256, mac_sha, kea_srp_dss}, |
+ {TLS_RSA_WITH_AES_128_CBC_SHA, cipher_aes_128, mac_sha, kea_rsa}, |
+ {TLS_DHE_DSS_WITH_AES_128_CBC_SHA, cipher_aes_128, mac_sha, kea_dhe_dss}, |
+ {TLS_DHE_RSA_WITH_AES_128_CBC_SHA, cipher_aes_128, mac_sha, kea_dhe_rsa}, |
+@@ -420,7 +447,8 @@ static const CK_MECHANISM_TYPE kea_alg_defs[] = { |
+ CKM_RSA_PKCS, |
+ CKM_DH_PKCS_DERIVE, |
+ CKM_KEA_KEY_DERIVE, |
+- CKM_ECDH1_DERIVE |
++ CKM_ECDH1_DERIVE, |
++ CKM_NSS_SRP_DERIVE |
+ }; |
+ |
+ typedef struct SSLCipher2MechStr { |
+@@ -695,12 +723,27 @@ ssl3_config_match_init(sslSocket *ss) |
+ } |
+ #endif /* NSS_ENABLE_ECC */ |
+ |
++ /* XXX this should be merged with switch(kea) from above */ |
++ switch (cipher_def->key_exchange_alg) { |
++ case kea_srp_rsa: |
++ svrAuth = ss->serverCerts + kt_rsa; |
++ break; |
++ case kea_srp_dss: |
++ svrAuth = ss->serverCerts + kt_null; /* don't ask me..*/ |
++ break; |
++ default: |
++ svrAuth = ss->serverCerts + exchKeyType; |
++ break; |
++ } |
++ |
++ |
+ /* Mark the suites that are backed by real tokens, certs and keys */ |
+ suite->isPresent = (PRBool) |
+ (((exchKeyType == kt_null) || |
+ ((!isServer || (svrAuth->serverKeyPair && |
+ svrAuth->SERVERKEY && |
+- svrAuth->serverCertChain)) && |
++ svrAuth->serverCertChain) || |
++ cipher_def->key_exchange_alg == kea_srp) && |
+ PK11_TokenExists(kea_alg_defs[exchKeyType]))) && |
+ ((cipher_alg == calg_null) || PK11_TokenExists(cipher_mech))); |
+ if (suite->isPresent) |
+@@ -1080,6 +1123,57 @@ ssl3_ComputeExportRSAKeyHash(SECItem modulus, SECItem publicExponent, |
+ return rv; |
+ } |
+ |
++/* Caller must set hiLevel error code. |
++ * Called from ssl3_SendSRPServerKeyExchange */ |
++static SECStatus |
++ssl3_ComputeSRPKeyHash(SECItem *N, SECItem *g, SECItem *s, SECItem *B, |
++ SSL3Random *client_rand, SSL3Random *server_rand, |
++ SSL3Hashes *hashes, PRBool bypassPKCS11) |
++{ |
++ PRUint8 * hashBuf; |
++ PRUint8 * pBuf; |
++ SECStatus rv = SECFailure; |
++ unsigned int bufLen; |
++ |
++ bufLen = 2*SSL3_RANDOM_LENGTH + N->len + 2 + g->len + 2 |
++ + s->len + 1 + B->len + 2; |
++ |
++ hashBuf = PORT_Alloc(bufLen); |
++ if (!hashBuf) { |
++ return SECFailure; |
++ } |
++ |
++ memcpy(hashBuf, client_rand, SSL3_RANDOM_LENGTH); |
++ pBuf = hashBuf + SSL3_RANDOM_LENGTH; |
++ memcpy(pBuf, server_rand, SSL3_RANDOM_LENGTH); |
++ pBuf += SSL3_RANDOM_LENGTH; |
++ pBuf[0] = (PRUint8)(N->len >> 8); |
++ pBuf[1] = (PRUint8)(N->len); |
++ pBuf+=2; |
++ memcpy(pBuf, N->data, N->len); |
++ pBuf += N->len; |
++ pBuf[0] = (PRUint8)(g->len >> 8); |
++ pBuf[1] = (PRUint8)(g->len); |
++ pBuf+=2; |
++ memcpy(pBuf, g->data, g->len); |
++ pBuf += g->len; |
++ pBuf[0] = (PRUint8)(s->len); |
++ pBuf+=1; |
++ memcpy(pBuf, s->data, s->len); |
++ pBuf += s->len; |
++ pBuf[0] = (PRUint8)(B->len >> 8); |
++ pBuf[1] = (PRUint8)(B->len); |
++ pBuf+=2; |
++ memcpy(pBuf, B->data, B->len); |
++ pBuf += B->len; |
++ |
++ rv = ssl3_ComputeCommonKeyHash(hashBuf, bufLen, hashes, bypassPKCS11); |
++ |
++ if (hashBuf) |
++ PORT_Free(hashBuf); |
++ return rv; |
++} |
++ |
+ /* Caller must set hiLevel error code. */ |
+ /* Called from ssl3_HandleServerKeyExchange. */ |
+ static SECStatus |
+@@ -2663,6 +2757,8 @@ ssl3_HandleAlert(sslSocket *ss, sslBuffer *buf) |
+ error = SSL_ERROR_BAD_CERT_STATUS_RESPONSE_ALERT; break; |
+ case bad_certificate_hash_value: |
+ error = SSL_ERROR_BAD_CERT_HASH_VALUE_ALERT; break; |
++ case unknown_psk_identity: |
++ error = SSL_ERROR_UNKNOWN_PSK_IDENTITY_ALERT; break; |
+ default: error = SSL_ERROR_RX_UNKNOWN_ALERT; break; |
+ } |
+ if (level == alert_fatal) { |
+@@ -2828,7 +2924,8 @@ ssl3_DeriveMasterSecret(sslSocket *ss, PK11SymKey *pms) |
+ * data into a 48-byte value. |
+ */ |
+ PRBool isDH = (PRBool) ((ss->ssl3.hs.kea_def->exchKeyType == kt_dh) || |
+- (ss->ssl3.hs.kea_def->exchKeyType == kt_ecdh)); |
++ (ss->ssl3.hs.kea_def->exchKeyType == kt_ecdh) || |
++ (ss->ssl3.hs.kea_def->exchKeyType == kt_srp)); |
+ SECStatus rv = SECFailure; |
+ CK_MECHANISM_TYPE master_derive; |
+ CK_MECHANISM_TYPE key_derive; |
+@@ -4733,8 +4830,242 @@ loser: |
+ return rv; |
+ } |
+ |
++/* Read srp values from datastream and verify the signature |
++ * if requiried by cipher. Save parameters to ss->sec.peerKey. |
++ * |
++ * called from ssl3_HandleServerKeyExchange |
++ */ |
++static SECStatus |
++ssl3_HandleSRPServerKeyExchange(sslSocket *ss, SSL3Opaque *b, |
++ PRUint32 length) { |
++ |
++ SECItem signature = {siBuffer, NULL, 0}; |
++ PRArenaPool *arena = NULL; |
++ SECKEYPublicKey *peerKey = NULL; |
++ SECStatus rv; |
++ SSL3Hashes hashes; |
++ SECItem srp_N, srp_g, srp_s, srp_ppub; |
++ int errCode; |
++ |
++ rv = ssl3_ConsumeHandshakeVariable(ss, &srp_N, 2, &b, &length); |
++ if (rv != SECSuccess) { |
++ goto loser; /* malformed. */ |
++ } |
++ rv = ssl3_ConsumeHandshakeVariable(ss, &srp_g, 2, &b, &length); |
++ if (rv != SECSuccess) { |
++ goto loser; /* malformed. */ |
++ } |
++ rv = ssl3_ConsumeHandshakeVariable(ss, &srp_s, 1, &b, &length); |
++ if (rv != SECSuccess) { |
++ goto loser; /* malformed. */ |
++ } |
++ rv = ssl3_ConsumeHandshakeVariable(ss, &srp_ppub, 2, &b, &length); |
++ if (rv != SECSuccess) { |
++ goto loser; /* malformed. */ |
++ } |
++ |
++ if (ss->ssl3.hs.kea_def->kea != kea_srp) { /* there MUST be a signature */ |
++ rv = ssl3_ConsumeHandshakeVariable(ss, &signature, 2, &b, &length); |
++ if (rv != SECSuccess) { |
++ goto loser; /* malformed. */ |
++ } |
++ rv = ssl3_ComputeSRPKeyHash(&srp_N, &srp_g, &srp_s, &srp_ppub, |
++ &ss->ssl3.hs.client_random, |
++ &ss->ssl3.hs.server_random, |
++ &hashes, ss->opt.bypassPKCS11); |
++ if (rv != SECSuccess) { |
++ errCode = ssl_MapLowLevelError(SSL_ERROR_SERVER_KEY_EXCHANGE_FAILURE); |
++ goto alert_loser; |
++ } |
++ rv = ssl3_VerifySignedHashes(&hashes, ss->sec.peerCert, &signature, |
++ PR_TRUE, ss->pkcs11PinArg); |
++ if (rv != SECSuccess) { |
++ errCode = ssl_MapLowLevelError(SSL_ERROR_SERVER_KEY_EXCHANGE_FAILURE); |
++ goto alert_loser; |
++ } |
++ } |
++ |
++ /* all ok, save and return */ |
++ arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE); |
++ if (arena == NULL) { |
++ return SECFailure; |
++ } |
++ ss->sec.peerKey = peerKey = PORT_ArenaZNew(arena, SECKEYPublicKey); |
++ if (peerKey == NULL) { |
++ return SECFailure; |
++ } |
++ peerKey->arena = arena; |
++ peerKey->keyType = srpKey; |
++ peerKey->pkcs11Slot = NULL; |
++ peerKey->pkcs11ID = CK_INVALID_HANDLE; |
++ |
++ if (SECITEM_CopyItem(arena, &peerKey->u.srp.N, &srp_N) || |
++ SECITEM_CopyItem(arena, &peerKey->u.srp.g, &srp_g) || |
++ SECITEM_CopyItem(arena, &peerKey->u.srp.s, &srp_s) || |
++ SECITEM_CopyItem(arena, &peerKey->u.srp.ppub, &srp_ppub)) { |
++ return SECFailure; |
++ } |
++ return SECSuccess; |
++ |
++alert_loser: |
++ (void)SSL3_SendAlert(ss, alert_fatal, illegal_parameter); |
++loser: |
++ PORT_SetError(errCode); |
++ return SECFailure; |
++} |
++ |
++/* Calculate ClientKeyExchange and Pre-Master-Secret via SRP_GenKeys(), |
++ * then send ClientKeyExchange and derive SSL master key |
++ * |
++ * called from ssl3_SendClientKeyExchange() |
++ */ |
++static SECStatus |
++ssl3_SendSRPClientKeyExchange(sslSocket *ss, SECKEYPublicKey * pubKey) { |
++ |
++ SECKEYSRPParams *srpParam; |
++ SECStatus rv; |
++ |
++ PORT_Assert( ss->opt.noLocks || ssl_HaveSSL3HandshakeLock(ss) ); |
++ PORT_Assert( ss->opt.noLocks || ssl_HaveXmitBufLock(ss)); |
++ |
++ srpParam = PORT_ZAlloc(sizeof(SECKEYSRPParams)); |
++ if (!srpParam) { |
++ ssl_MapLowLevelError(SSL_ERROR_SERVER_KEY_EXCHANGE_FAILURE); |
++ goto loser; |
++ } |
++ |
++ /* PW-Callback overrides SSL_SetUserLogin. If both fail to |
++ * provide a password, the token must know it or fail. */ |
++ if (ss->getUserPasswd) { |
++ if (!ss->sec.userPasswd) |
++ ss->sec.userPasswd = SECITEM_AllocItem(NULL,NULL,0); |
++ SECITEM_FreeItem(ss->sec.userPasswd, PR_FALSE); |
++ ss->getUserPasswd(ss->fd, ss->sec.userPasswd, ss->getUserPasswdArg); |
++ } |
++ if (ss->sec.userPasswd) { |
++ srpParam->secret.data = ss->sec.userPasswd->data; |
++ srpParam->secret.len = ss->sec.userPasswd->len; |
++ ss->sec.userPasswd = NULL; |
++ } |
+ |
++ /* calculate client key pair and PMS, then send key exchange data */ |
++ if (ss->opt.bypassPKCS11) { |
++ SECItem pms = {0, NULL, 0}; |
++ SRPPrivateKey *prvKey; |
++ SRPKeyPairParams keyPairParam; |
++ keyPairParam.N.data = pubKey->u.srp.N.data; |
++ keyPairParam.N.len = pubKey->u.srp.N.len; |
++ keyPairParam.g.data = pubKey->u.srp.g.data; |
++ keyPairParam.g.len = pubKey->u.srp.g.len; |
++ keyPairParam.secret.data = srpParam->secret.data; |
++ keyPairParam.secret.len = srpParam->secret.len; |
++ |
++ rv = SRP_NewClientKeyPair(&prvKey, &keyPairParam); |
++ if (rv != SECSuccess) goto loser; /* err set by SRP_ClientDerive */ |
++ |
++ SRPDeriveParams deriveParam; |
++ deriveParam.N.data = pubKey->u.srp.N.data; |
++ deriveParam.N.len = pubKey->u.srp.N.len; |
++ deriveParam.g.data = pubKey->u.srp.g.data; |
++ deriveParam.g.len = pubKey->u.srp.g.len; |
++ deriveParam.s.data = pubKey->u.srp.s.data; |
++ deriveParam.s.len = pubKey->u.srp.s.len; |
++ deriveParam.u.data = ss->sec.userName->data; |
++ deriveParam.u.len = ss->sec.userName->len; |
++ deriveParam.ppub.data= pubKey->u.srp.ppub.data; |
++ deriveParam.ppub.len = pubKey->u.srp.ppub.len; |
++ |
++ |
++ if (SECSuccess != SRP_ClientDerive(prvKey, &deriveParam, &pms)) { |
++ goto derive_fail; |
++ } |
++ |
++ /* client key exchange data */ |
++ rv = ssl3_AppendHandshakeHeader(ss, client_key_exchange, |
++ prvKey->pubKey.len + 2); |
++ if (rv != SECSuccess) goto loser; /* err set by ssl3_AppendHandshake* */ |
++ rv = ssl3_AppendHandshakeVariable(ss, prvKey->pubKey.data, |
++ prvKey->pubKey.len, 2); |
++ if (rv != SECSuccess) goto loser; /* err set by ssl3_AppendHandshake* */ |
++ |
++ /* init pending cipher spec*/ |
++ rv = ssl3_MasterKeyDeriveBypass(ss->ssl3.pwSpec, |
++ (unsigned char *)&ss->ssl3.hs.client_random, |
++ (unsigned char *)&ss->ssl3.hs.server_random, |
++ &pms, PR_TRUE, PR_FALSE); |
++ if (rv != SECSuccess) { |
++ ss->ssl3.pwSpec->msItem.data = ss->ssl3.pwSpec->raw_master_secret; |
++ ss->ssl3.pwSpec->msItem.len = SSL3_MASTER_SECRET_LENGTH; |
++ PK11_GenerateRandom(ss->ssl3.pwSpec->msItem.data, |
++ SSL3_MASTER_SECRET_LENGTH); |
++ } |
++ rv = ssl3_InitPendingCipherSpec(ss, NULL); |
++ |
++ SECITEM_FreeItem(&pms, PR_FALSE); |
++ PORT_FreeArena(prvKey->arena, PR_TRUE); |
++ } else { /* PK11 path */ |
++ PK11SymKey *pms = NULL; |
++ SECKEYPrivateKey *prvKey = NULL; |
++ SECKEYPublicKey *newPub = NULL; |
++ |
++ srpParam->N.data = pubKey->u.srp.N.data; |
++ srpParam->N.len = pubKey->u.srp.N.len; |
++ srpParam->g.data = pubKey->u.srp.g.data; |
++ srpParam->g.len = pubKey->u.srp.g.len; |
++ srpParam->s.data = pubKey->u.srp.s.data; |
++ srpParam->s.len = pubKey->u.srp.s.len; |
++ srpParam->u.data = ss->sec.userName->data; |
++ srpParam->u.len = ss->sec.userName->len; |
++ |
++ /* The token handles (missing) info supplied in srpParam |
++ * The template not actually involved in key generation, |
++ * but it's important in the server key exchange */ |
++ |
++ prvKey = SECKEY_CreateSRPPrivateKey(srpParam, &newPub, PR_FALSE, NULL); |
++ if (!prvKey) { |
++ ssl_MapLowLevelError(SEC_ERROR_KEYGEN_FAIL); |
++ rv = SECFailure; |
++ goto loser; |
++ } |
++ SECITEM_CopyItem(newPub->arena, &newPub->u.srp.ppub, &pubKey->u.srp.ppub); |
++ |
++ /* Now all data is in newPub and prvKey, compute pms with them */ |
++ pms = PK11_PubDerive(prvKey, newPub, PR_FALSE, NULL, NULL, |
++ CKM_NSS_SRP_DERIVE, CKM_TLS_MASTER_KEY_DERIVE, CKF_DERIVE, 0, NULL); |
+ |
++ if (!pms) { |
++ goto derive_fail; |
++ } |
++ |
++ /* init pending cipher spec*/ |
++ rv = ssl3_InitPendingCipherSpec(ss, pms); |
++ |
++ |
++ /* client key exchange data */ |
++ rv = ssl3_AppendHandshakeHeader(ss, client_key_exchange, |
++ newPub->u.srp.pub.len + 2); |
++ if (rv != SECSuccess) goto loser; /* err set by ssl3_AppendHandshake* */ |
++ rv = ssl3_AppendHandshakeVariable(ss, newPub->u.srp.pub.data, |
++ newPub->u.srp.pub.len, 2); |
++ if (rv != SECSuccess) goto loser; /* err set by ssl3_AppendHandshake* */ |
++ |
++ if (pms) PK11_FreeSymKey(pms); |
++ SECKEY_DestroyPublicKey(newPub); |
++ } /* end of PK11 path */ |
++ |
++loser: |
++ SECITEM_FreeItem(ss->sec.userName, PR_TRUE); |
++ SECITEM_ZfreeItem(ss->sec.userPasswd, PR_TRUE); |
++ PORT_Free(srpParam); |
++ /* caller frees pubKey */ |
++ return rv; |
++derive_fail: |
++ if (PORT_GetError() == SEC_ERROR_SRP_UNSUPPORTED_GROUP) |
++ SSL3_SendAlert(ss, alert_fatal, insufficient_security); |
++ if (PORT_GetError() == SEC_ERROR_SRP_ILLEGAL_PARAMETER) |
++ SSL3_SendAlert(ss, alert_fatal, illegal_parameter); |
++ return SECFailure; |
++} |
+ |
+ |
+ /* Called from ssl3_HandleServerHelloDone(). */ |
+@@ -4794,7 +5125,9 @@ ssl3_SendClientKeyExchange(sslSocket *ss) |
+ rv = ssl3_SendECDHClientKeyExchange(ss, serverKey); |
+ break; |
+ #endif /* NSS_ENABLE_ECC */ |
+- |
++ case kt_srp: |
++ rv = ssl3_SendSRPClientKeyExchange(ss, serverKey); |
++ break; |
+ default: |
+ /* got an unknown or unsupported Key Exchange Algorithm. */ |
+ SEND_ALERT |
+@@ -5284,7 +5617,8 @@ ssl3_HandleServerKeyExchange(sslSocket *ss, SSL3Opaque *b, PRUint32 length) |
+ desc = unexpected_message; |
+ goto alert_loser; |
+ } |
+- if (ss->sec.peerCert == NULL) { |
++ if (ss->sec.peerCert == NULL && |
++ ss->ssl3.hs.suite_def->key_exchange_alg != kea_srp) { |
+ errCode = SSL_ERROR_RX_UNEXPECTED_SERVER_KEY_EXCH; |
+ desc = unexpected_message; |
+ goto alert_loser; |
+@@ -5473,6 +5807,13 @@ ssl3_HandleServerKeyExchange(sslSocket *ss, SSL3Opaque *b, PRUint32 length) |
+ rv = ssl3_HandleECDHServerKeyExchange(ss, b, length); |
+ return rv; |
+ #endif /* NSS_ENABLE_ECC */ |
++ case kt_srp: |
++ rv = ssl3_HandleSRPServerKeyExchange(ss, b, length); |
++ if (rv != SECSuccess) { |
++ errCode = ssl_MapLowLevelError(SSL_ERROR_SERVER_KEY_EXCHANGE_FAILURE); |
++ goto alert_loser; |
++ } |
++ return rv; |
+ |
+ default: |
+ desc = handshake_failure; |
+@@ -6034,16 +6375,20 @@ ssl3_SendServerHelloSequence(sslSocket *ss) |
+ if (rv != SECSuccess) { |
+ return rv; /* err code is set. */ |
+ } |
+- rv = ssl3_SendCertificate(ss); |
+- if (rv != SECSuccess) { |
+- return rv; /* error code is set. */ |
+- } |
+ /* We have to do this after the call to ssl3_SendServerHello, |
+ * because kea_def is set up by ssl3_SendServerHello(). |
+ */ |
+ kea_def = ss->ssl3.hs.kea_def; |
+ ss->ssl3.hs.usedStepDownKey = PR_FALSE; |
+ |
++ |
++ if (kea_def->kea != kea_srp) { /* SRP auth only */ |
++ rv = ssl3_SendCertificate(ss); |
++ if (rv != SECSuccess) { |
++ return rv; /* error code is set. */ |
++ } |
++ } |
++ |
+ if (kea_def->is_limited && kea_def->exchKeyType == kt_rsa) { |
+ /* see if we can legally use the key in the cert. */ |
+ int keyLen; /* bytes */ |
+@@ -6075,6 +6420,11 @@ ssl3_SendServerHelloSequence(sslSocket *ss) |
+ return rv; /* err code was set. */ |
+ } |
+ #endif /* NSS_ENABLE_ECC */ |
++ } else if ( kea_def->exchKeyType == kt_srp ) { |
++ rv = ssl3_SendServerKeyExchange(ss); |
++ if (rv != SECSuccess) { |
++ return rv; /* err code was set. */ |
++ } |
+ } |
+ |
+ if (ss->opt.requestCertificate) { |
+@@ -7099,6 +7449,196 @@ ssl3_SendServerHello(sslSocket *ss) |
+ return SECSuccess; |
+ } |
+ |
++/* ssl3_SendSRPServerKeyExchange() |
++ * called by ssl3_SendServerKeyExchange() |
++ * |
++ * - make sure we got a userid in the srp client hello extension |
++ * - retrieve verifier and parameters for the user via callback func |
++ * - if user nonexistant, CB makes something up if it wants to |
++ * - continue by creating and sending the SRP key exchange data: |
++ * |
++ * N, g, s, v = <read from password file> |
++ * b = random() |
++ * k = SHA1(N | PAD(g)) |
++ * B = k*v + g^b % N |
++ * send (N,g,s,B) |
++ * |
++ * save values b,v,N for calculation of pms in ssl3_HandleSRPClientKeyExchange |
++ */ |
++ |
++SECStatus |
++ssl3_SendSRPServerKeyExchange(sslSocket *ss) { |
++ |
++ int bytes = 0; |
++ const ssl3KEADef *kea_def = ss->ssl3.hs.kea_def; |
++ SECItem signed_hash = {siBuffer, NULL, 0}; |
++ SECStatus rv = SECFailure; |
++ SECKEYSRPPublicKey *srp = NULL; |
++ SECKEYPublicKey *pubKey = NULL; |
++ SECKEYPrivateKey *prvKey = NULL; |
++ SECKEYSRPParams *srpParams; |
++ SSL3Hashes hashes; |
++ |
++ /* send error if no userid was supplied in Client Hello */ |
++ if (!ss->sec.userName || !ss->sec.userName->data) |
++ goto unknown_id; |
++ |
++ /* Ask application for SRP parameters for specified username. |
++ * Information provided via callback overrides data set on token. |
++ * If no params provided, the token must supply them or fail. |
++ * Callback may fail for nonexistant user. |
++ */ |
++ |
++ srpParams = PORT_ZAlloc(sizeof(SECKEYSRPParams)); |
++ if (!srpParams) goto no_memory; |
++ |
++ srpParams->u.data = ss->sec.userName->data; |
++ srpParams->u.len = ss->sec.userName->len; |
++ |
++ if (ss->getSRPParams) { |
++ rv = ss->getSRPParams(ss->fd, srpParams, ss->getSRPParamsArg); |
++ if (rv != SECSuccess) { |
++ SECITEM_FreeItem(&srpParams->N, PR_FALSE); |
++ SECITEM_FreeItem(&srpParams->g, PR_FALSE); |
++ SECITEM_FreeItem(&srpParams->s, PR_FALSE); |
++ SECITEM_ZfreeItem(&srpParams->secret, PR_FALSE); |
++ PORT_Free(srpParams); |
++ goto unknown_id; |
++ } |
++ } |
++ |
++ /* create SRP server key pair */ |
++ if (ss->opt.bypassPKCS11) { |
++ /* srpParams, keyPairParams are temporary. pubKey and prvKey have |
++ * own arenas and are saved for ssl3_HandleSRPClientKeyExchange */ |
++ SRPPrivateKey *srpPrv; |
++ SRPKeyPairParams keyPairParams; |
++ |
++ PRArenaPool *arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE); |
++ if (!arena) goto no_memory; |
++ |
++ keyPairParams.N.data = srpParams->N.data; |
++ keyPairParams.N.len = srpParams->N.len; |
++ keyPairParams.g.data = srpParams->g.data; |
++ keyPairParams.g.len = srpParams->g.len; |
++ keyPairParams.secret.data = srpParams->secret.data; |
++ keyPairParams.secret.len = srpParams->secret.len; |
++ |
++ rv = SRP_NewServerKeyPair(&srpPrv, &keyPairParams); |
++ if (rv != SECSuccess) { |
++ ssl_MapLowLevelError(SEC_ERROR_KEYGEN_FAIL); |
++ return rv; |
++ } |
++ prvKey = (SECKEYPrivateKey *)srpPrv; |
++ |
++ /* create pubKey from temporary stuff */ |
++ pubKey = PORT_ArenaZAlloc(arena, sizeof(SECKEYPublicKey)); |
++ if (!pubKey) goto no_memory; |
++ pubKey->arena = arena; |
++ srp = &pubKey->u.srp; |
++ |
++ SECITEM_CopyItem(arena, &srp->N, &srpParams->N); |
++ SECITEM_CopyItem(arena, &srp->g, &srpParams->g); |
++ SECITEM_CopyItem(arena, &srp->s, &srpParams->s); |
++ SECITEM_CopyItem(arena, &srp->u, &srpParams->u); |
++ SECITEM_CopyItem(arena, &srp->pub, &srpPrv->pubKey); |
++ |
++ } else { |
++ |
++ /* input: srpParams, output: prvKey = b,B,v, pubKey = N,g,s,u,B */ |
++ prvKey = SECKEY_CreateSRPPrivateKey(srpParams, &pubKey, PR_TRUE, NULL); |
++ if (!prvKey) { |
++ ssl_MapLowLevelError(SEC_ERROR_KEYGEN_FAIL); |
++ rv = SECFailure; |
++ goto cleanup; |
++ } |
++ srp = &pubKey->u.srp; |
++ } |
++ |
++ /* send N,g,s,B as ServerKeyExchange to Client */ |
++ /* optionally include signature for additional DSS/RSA auth */ |
++ |
++ if (kea_def->kea != kea_srp) { /* we need a RSA/DSA signature */ |
++ rv = ssl3_ComputeSRPKeyHash(&srp->N, &srp->g, &srp->s, &srp->pub, |
++ &ss->ssl3.hs.client_random, |
++ &ss->ssl3.hs.server_random, |
++ &hashes, ss->opt.bypassPKCS11); |
++ if (rv != SECSuccess) { |
++ ssl_MapLowLevelError(SSL_ERROR_SERVER_KEY_EXCHANGE_FAILURE); |
++ goto loser; |
++ } |
++ /* look if we have a certificate for selected algo */ |
++ if (kea_def->kea == kea_srp_rsa) |
++ bytes = kt_rsa; |
++ else |
++ bytes = kt_null; |
++ |
++ if (!(&ss->serverCerts[bytes])) { |
++ /* ciphersuite signing algo does not match supplied certificate */ |
++ PORT_SetError(SSL_ERROR_CERT_KEA_MISMATCH); |
++ return SECFailure; |
++ } |
++ rv = ssl3_SignHashes(&hashes, ss->serverCerts[bytes].SERVERKEY, |
++ &signed_hash, PR_TRUE); |
++ bytes = 2 + signed_hash.len; |
++ } |
++ |
++ bytes += srp->N.len + srp->g.len + srp->s.len + srp->pub.len + 7; |
++ |
++ rv = ssl3_AppendHandshakeHeader(ss, server_key_exchange, bytes); |
++ if (rv != SECSuccess) |
++ return rv; /* err set by AppendHandshake. */ |
++ |
++ rv = ssl3_AppendHandshakeVariable(ss, srp->N.data, srp->N.len, 2); |
++ if (rv != SECSuccess) |
++ return rv; /* err set by AppendHandshake. */ |
++ |
++ rv = ssl3_AppendHandshakeVariable(ss, srp->g.data, srp->g.len, 2); |
++ if (rv != SECSuccess) |
++ return rv; /* err set by AppendHandshake. */ |
++ |
++ rv = ssl3_AppendHandshakeVariable(ss, srp->s.data, srp->s.len, 1); |
++ if (rv != SECSuccess) |
++ return rv; /* err set by AppendHandshake. */ |
++ |
++ rv = ssl3_AppendHandshakeVariable(ss, srp->pub.data, srp->pub.len, 2); |
++ if (rv != SECSuccess) |
++ return rv; /* err set by AppendHandshake. */ |
++ |
++ if (kea_def->kea != kea_srp) { |
++ rv = ssl3_AppendHandshakeVariable(ss, signed_hash.data, |
++ signed_hash.len, 2); |
++ if (rv != SECSuccess) { |
++ return rv; /* err set by AppendHandshake. */ |
++ } |
++ SECITEM_FreeItem(&signed_hash, PR_FALSE); |
++ } |
++ |
++ /* save prvKey / pubKey for use in HandleSRPClientExchange |
++ * XXX in bypassPK11, prvKey is no PK11 object and must be casted */ |
++ ssl3KeyPair *srpPair = ssl3_NewKeyPair(prvKey, pubKey); |
++ ss->serverCerts[kt_srp].serverKeyPair = srpPair; |
++ |
++cleanup: |
++ SECITEM_FreeItem(&srpParams->N, PR_FALSE); |
++ SECITEM_FreeItem(&srpParams->g, PR_FALSE); |
++ SECITEM_FreeItem(&srpParams->s, PR_FALSE); |
++ SECITEM_ZfreeItem(&srpParams->secret, PR_FALSE); |
++ SECITEM_FreeItem(ss->sec.userName, PR_TRUE); |
++ if (srpParams) PORT_Free(srpParams); |
++ return rv; |
++loser: |
++ PORT_SetError(SSL_ERROR_INTERNAL_ERROR_ALERT); |
++ (void)SSL3_SendAlert(ss, alert_fatal, internal_error); |
++ return SECFailure; |
++unknown_id: |
++ PORT_SetError(SSL_ERROR_UNKNOWN_PSK_IDENTITY_ALERT); |
++ (void)SSL3_SendAlert(ss, alert_fatal, unknown_psk_identity); |
++ return SECFailure; |
++no_memory: |
++ ssl_MapLowLevelError(SSL_ERROR_SERVER_KEY_EXCHANGE_FAILURE); |
++ return SECFailure; |
++} |
+ |
+ static SECStatus |
+ ssl3_SendServerKeyExchange(sslSocket *ss) |
+@@ -7183,7 +7723,9 @@ const ssl3KEADef * kea_def = ss->ssl3.hs.kea_def; |
+ return rv; |
+ } |
+ #endif /* NSS_ENABLE_ECC */ |
+- |
++ case kt_srp: |
++ rv = ssl3_SendSRPServerKeyExchange(ss); |
++ return rv; |
+ case kt_dh: |
+ case kt_null: |
+ default: |
+@@ -7536,6 +8078,101 @@ double_bypass: |
+ return SECSuccess; |
+ } |
+ |
++/* |
++ * extract SRP value A from ClientKeyExchange |
++ * calculate pre-master-secret and init cipher specs |
++ * |
++ * called by ssl3_HandleClientKeyExchange |
++ */ |
++SECStatus |
++ssl3_HandleSRPClientKeyExchange(sslSocket *ss, SSL3Opaque *b, |
++ PRUint32 length) { |
++ |
++ SECItem ppub; /* peers public key ('A') */ |
++ sslServerCerts sc; |
++ SECStatus rv = SECFailure; |
++ SECKEYPublicKey *pubKey = NULL; |
++ |
++ |
++ PORT_Assert( ss->opt.noLocks || ssl_HaveRecvBufLock(ss) ); |
++ PORT_Assert( ss->opt.noLocks || ssl_HaveSSL3HandshakeLock(ss) ); |
++ |
++ rv = ssl3_ConsumeHandshakeVariable(ss, &ppub, 2, &b, &length); |
++ if (rv != SECSuccess) { |
++ PORT_SetError(SSL_ERROR_CLIENT_KEY_EXCHANGE_FAILURE); |
++ return SECFailure; |
++ } |
++ |
++ sc = ss->serverCerts[kt_srp]; |
++ pubKey = sc.serverKeyPair->pubKey; |
++ |
++ SECITEM_CopyItem(pubKey->arena, &pubKey->u.srp.ppub, &ppub); |
++ |
++ if (ss->opt.bypassPKCS11) { |
++ SRPPrivateKey *prvKey = NULL; |
++ SECItem pms = { 0, NULL, 0 }; |
++ SRPDeriveParams param; |
++ |
++ prvKey = (SRPPrivateKey *)sc.serverKeyPair->privKey; |
++ |
++ param.N.data = pubKey->u.srp.N.data; |
++ param.N.len = pubKey->u.srp.N.len; |
++ param.g.data = pubKey->u.srp.g.data; |
++ param.g.len = pubKey->u.srp.g.len; |
++ param.ppub.data = pubKey->u.srp.ppub.data; |
++ param.ppub.len = pubKey->u.srp.ppub.len; |
++ |
++ if (SECSuccess != SRP_ServerDerive(prvKey, ¶m, &pms)) |
++ goto derive_fail; |
++ |
++ ssl_GetSpecWriteLock(ss); |
++ /* create MS out of MS, bypassing PKCS11 */ |
++ rv = ssl3_MasterKeyDeriveBypass(ss->ssl3.pwSpec, |
++ (unsigned char *)&ss->ssl3.hs.client_random, |
++ (unsigned char *)&ss->ssl3.hs.server_random, |
++ &pms, PR_TRUE, PR_FALSE); |
++ if (rv != SECSuccess) { |
++ ss->ssl3.pwSpec->msItem.data = ss->ssl3.pwSpec->raw_master_secret; |
++ ss->ssl3.pwSpec->msItem.len = SSL3_MASTER_SECRET_LENGTH; |
++ PK11_GenerateRandom(ss->ssl3.pwSpec->msItem.data, ss->ssl3.pwSpec->msItem.len); |
++ } |
++ |
++ rv = ssl3_InitPendingCipherSpec(ss, NULL); |
++ |
++ SECITEM_ZfreeItem(&pms, PR_FALSE); |
++ PORT_FreeArena(prvKey->arena, PR_TRUE); /* XXX FreeArena does not zeroize! */ |
++ sc.serverKeyPair->privKey = NULL; |
++ |
++ } else { |
++ SECKEYPrivateKey *prvKey = NULL; |
++ PK11SymKey *pms = NULL; /* pre-master secret */ |
++ |
++ prvKey = sc.serverKeyPair->privKey; |
++ |
++ /* Calculate PMS based on clntKey and public params */ |
++ pms = PK11_PubDerive(prvKey, pubKey, PR_TRUE, NULL, NULL, |
++ CKM_NSS_SRP_DERIVE, CKM_TLS_MASTER_KEY_DERIVE, CKF_DERIVE, 0, NULL); |
++ |
++ if (!pms) { |
++ goto derive_fail; |
++ } |
++ |
++ ssl_GetSpecWriteLock(ss); |
++ /* derive master secret from pms */ |
++ rv = ssl3_InitPendingCipherSpec(ss, pms); |
++ ssl_ReleaseSpecWriteLock(ss); |
++ |
++ PK11_FreeSymKey(pms); |
++ /*SECKEY_DestroyPrivateKey(prvKey);*/ |
++ } |
++ |
++ return rv; |
++derive_fail: |
++ if (PORT_GetError() == SEC_ERROR_SRP_ILLEGAL_PARAMETER) |
++ SSL3_SendAlert(ss, alert_fatal, illegal_parameter); |
++ return rv; |
++} |
++ |
+ |
+ /* Called from ssl3_HandleHandshakeMessage() when it has deciphered a complete |
+ * ssl3 ClientKeyExchange message from the remote client |
+@@ -7608,7 +8245,8 @@ skip: |
+ serverKey = serverKeyPair->privKey; |
+ } |
+ |
+- if (serverKey == NULL) { |
++ /* XXX hack, figure out this serverKey thing..*/ |
++ if (serverKey == NULL && kea_def->exchKeyType != kt_srp) { |
+ SEND_ALERT |
+ PORT_SetError(SSL_ERROR_NO_SERVER_KEY_FOR_ALG); |
+ return SECFailure; |
+@@ -7649,7 +8287,12 @@ skip: |
+ } |
+ break; |
+ #endif /* NSS_ENABLE_ECC */ |
+- |
++ case kt_srp: |
++ rv = ssl3_HandleSRPClientKeyExchange(ss, b, length); |
++ if (rv != SECSuccess) { |
++ return SECFailure; /* error code set */ |
++ } |
++ break; |
+ default: |
+ (void) ssl3_HandshakeFailure(ss); |
+ PORT_SetError(SEC_ERROR_UNSUPPORTED_KEYALG); |
+@@ -7823,8 +8466,12 @@ ssl3_SendCertificate(sslSocket *ss) |
+ * using EC certificates. |
+ */ |
+ if ((ss->ssl3.hs.kea_def->kea == kea_ecdhe_rsa) || |
+- (ss->ssl3.hs.kea_def->kea == kea_dhe_rsa)) { |
++ (ss->ssl3.hs.kea_def->kea == kea_dhe_rsa) || |
++ (ss->ssl3.hs.kea_def->kea == kea_srp_rsa)) { |
+ certIndex = kt_rsa; |
++ } else if |
++ (ss->ssl3.hs.kea_def->kea == kea_srp_dss) { |
++ certIndex = kt_null; |
+ } else { |
+ certIndex = ss->ssl3.hs.kea_def->exchKeyType; |
+ } |
+@@ -8244,7 +8891,9 @@ cert_block: |
+ ss->ssl3.hs.kea_def->kea == kea_ecdhe_ecdsa || |
+ ss->ssl3.hs.kea_def->kea == kea_ecdhe_rsa || |
+ #endif /* NSS_ENABLE_ECC */ |
+- ss->ssl3.hs.kea_def->exchKeyType == kt_dh) { |
++ ss->ssl3.hs.kea_def->exchKeyType == kt_dh || |
++ ss->ssl3.hs.kea_def->kea == kea_srp_dss || |
++ ss->ssl3.hs.kea_def->kea == kea_srp_rsa) { |
+ ss->ssl3.hs.ws = wait_server_key; /* allow server_key_exchange */ |
+ } |
+ } |
+diff --git a/net/third_party/nss/ssl/ssl3ecc.c b/net/third_party/nss/ssl/ssl3ecc.c |
+index 778c7ab..b899038 100644 |
+--- a/net/third_party/nss/ssl/ssl3ecc.c |
++++ b/net/third_party/nss/ssl/ssl3ecc.c |
+@@ -1191,3 +1191,60 @@ loser: |
+ } |
+ |
+ #endif /* NSS_ENABLE_ECC */ |
++ |
++/* send user mapping indication using info from ss->sec.userlogin |
++ * called from ssl3_CallHelloExtensionSenders */ |
++PRInt32 |
++ssl3_SendSRPHelloExtension(sslSocket * ss, PRBool append, |
++ PRUint32 maxBytes) |
++{ |
++ SECItem * user = ss->sec.userName; |
++ |
++ if (user == NULL) |
++ return 0; /* no credentials, no extension */ |
++ |
++ if (append && maxBytes >= user->len + 5) { |
++ SECStatus rv; |
++ /* extension_type 6 */ |
++ rv = ssl3_AppendHandshakeNumber(ss, 12, 2); |
++ if (rv != SECSuccess) return 0; |
++ /* length of extension */ |
++ rv = ssl3_AppendHandshakeNumber(ss, user->len + 1, 2); |
++ if (rv != SECSuccess) return 0; |
++ /* length of data */ |
++ rv = ssl3_AppendHandshakeNumber(ss, user->len, 1); |
++ if (rv != SECSuccess) return 0; |
++ /* extension_data = srp user name */ |
++ rv = ssl3_AppendHandshake(ss, user->data, user->len); |
++ if (rv != SECSuccess) return 0; |
++ } |
++ return user->len+5; |
++} |
++ |
++SECStatus |
++ssl3_HandleSRPHelloExtension(sslSocket *ss, PRUint16 ext, SECItem *data) |
++{ |
++ SECStatus rv; |
++ SECItem username; |
++ |
++ rv = ssl3_ConsumeHandshakeVariable(ss, &username, 1, &data->data, &data->len); |
++ if (rv != SECSuccess) |
++ return rv; |
++ |
++ /* enforce SRP username length constrain */ |
++ if (data->len > MAX_SRP_USERNAME_LENGTH) |
++ data->len = MAX_SRP_USERNAME_LENGTH; |
++ |
++ ss->sec.userName = PORT_ZAlloc(sizeof(SECItem)); |
++ if (!ss->sec.userName) |
++ goto no_memory; |
++ |
++ rv = SECITEM_CopyItem(NULL, ss->sec.userName, &username); |
++ if (rv != SECSuccess) |
++ goto no_memory; |
++ |
++ return rv; |
++no_memory: |
++ ssl_MapLowLevelError(SSL_ERROR_SERVER_KEY_EXCHANGE_FAILURE); |
++ return SECFailure; |
++} |
+diff --git a/net/third_party/nss/ssl/ssl3ext.c b/net/third_party/nss/ssl/ssl3ext.c |
+index b93671e..c2a27b4 100644 |
+--- a/net/third_party/nss/ssl/ssl3ext.c |
++++ b/net/third_party/nss/ssl/ssl3ext.c |
+@@ -78,6 +78,11 @@ static PRInt32 ssl3_SendRenegotiationInfoXtn(sslSocket * ss, |
+ PRBool append, PRUint32 maxBytes); |
+ static SECStatus ssl3_HandleRenegotiationInfoXtn(sslSocket *ss, |
+ PRUint16 ex_type, SECItem *data); |
++static SECStatus ssl3_HandleSRPHelloXtn(sslSocket *ss, PRUint16 ext, |
++ SECItem *data); |
++PRInt32 ssl3_SendSRPHelloXtn(sslSocket * ss, PRBool append, |
++ PRUint32 maxBytes); |
++ |
+ |
+ /* |
+ * Write bytes. Using this function means the SECItem structure |
+@@ -254,6 +259,7 @@ static const ssl3HelloExtensionHandler serverHelloHandlersTLS[] = { |
+ |
+ static const ssl3HelloExtensionHandler serverHelloHandlersSSL3[] = { |
+ { ssl_renegotiation_info_xtn, &ssl3_HandleRenegotiationInfoXtn }, |
++ { ssl_srp_hello_xtn, &ssl3_HandleSRPHelloXtn }, |
+ { -1, NULL } |
+ }; |
+ |
+@@ -272,6 +278,7 @@ ssl3HelloExtensionSender clientHelloSendersTLS[SSL_MAX_EXTENSIONS] = { |
+ { ssl_ec_point_formats_xtn, &ssl3_SendSupportedPointFormatsXtn }, |
+ #endif |
+ { ssl_session_ticket_xtn, &ssl3_SendSessionTicketXtn }, |
++ { ssl_srp_hello_xtn, &ssl3_SendSRPHelloXtn }, |
+ { ssl_next_proto_neg_xtn, &ssl3_ClientSendNextProtoNegoXtn }, |
+ { ssl_cert_status_xtn, &ssl3_ClientSendStatusRequestXtn }, |
+ { ssl_snap_start_xtn, &ssl3_SendSnapStartXtn } |
+@@ -1720,3 +1727,59 @@ ssl3_HandleRenegotiationInfoXtn(sslSocket *ss, PRUint16 ex_type, SECItem *data) |
+ return rv; |
+ } |
+ |
++/* send user mapping indication using info from ss->sec.userlogin |
++ * called from ssl3_CallHelloExtensionSenders */ |
++PRInt32 |
++ssl3_SendSRPHelloXtn(sslSocket * ss, PRBool append, |
++ PRUint32 maxBytes) |
++{ |
++ SECItem * user = ss->sec.userName; |
++ |
++ if (user == NULL) |
++ return 0; /* no credentials, no extension */ |
++ |
++ if (append && maxBytes >= user->len + 5) { |
++ SECStatus rv; |
++ /* extension_type 6 */ |
++ rv = ssl3_AppendHandshakeNumber(ss, 12, 2); |
++ if (rv != SECSuccess) return 0; |
++ /* length of extension */ |
++ rv = ssl3_AppendHandshakeNumber(ss, user->len + 1, 2); |
++ if (rv != SECSuccess) return 0; |
++ /* length of data */ |
++ rv = ssl3_AppendHandshakeNumber(ss, user->len, 1); |
++ if (rv != SECSuccess) return 0; |
++ /* extension_data = srp user name */ |
++ rv = ssl3_AppendHandshake(ss, user->data, user->len); |
++ if (rv != SECSuccess) return 0; |
++ } |
++ return user->len+5; |
++} |
++ |
++SECStatus |
++ssl3_HandleSRPHelloXtn(sslSocket *ss, PRUint16 ext, SECItem *data) |
++{ |
++ SECStatus rv; |
++ SECItem username; |
++ |
++ rv = ssl3_ConsumeHandshakeVariable(ss, &username, 1, &data->data, &data->len); |
++ if (rv != SECSuccess) |
++ return rv; |
++ |
++ /* enforce SRP username length constrain */ |
++ if (data->len > MAX_SRP_USERNAME_LENGTH) |
++ data->len = MAX_SRP_USERNAME_LENGTH; |
++ |
++ ss->sec.userName = PORT_ZAlloc(sizeof(SECItem)); |
++ if (!ss->sec.userName) |
++ goto no_memory; |
++ |
++ rv = SECITEM_CopyItem(NULL, ss->sec.userName, &username); |
++ if (rv != SECSuccess) |
++ goto no_memory; |
++ |
++ return rv; |
++no_memory: |
++ ssl_MapLowLevelError(SSL_ERROR_SERVER_KEY_EXCHANGE_FAILURE); |
++ return SECFailure; |
++} |
+diff --git a/net/third_party/nss/ssl/ssl3prot.h b/net/third_party/nss/ssl/ssl3prot.h |
+index aeaacdd..a043577 100644 |
+--- a/net/third_party/nss/ssl/ssl3prot.h |
++++ b/net/third_party/nss/ssl/ssl3prot.h |
+@@ -63,6 +63,8 @@ typedef uint16 ssl3CipherSuite; |
+ |
+ #define MAX_FRAGMENT_LENGTH 16384 |
+ |
++#define MAX_SRP_USERNAME_LENGTH 255 |
++ |
+ typedef enum { |
+ content_change_cipher_spec = 20, |
+ content_alert = 21, |
+@@ -137,7 +139,9 @@ typedef enum { |
+ certificate_unobtainable = 111, |
+ unrecognized_name = 112, |
+ bad_certificate_status_response = 113, |
+- bad_certificate_hash_value = 114 |
++ bad_certificate_hash_value = 114, |
++ |
++ unknown_psk_identity = 115 |
+ |
+ } SSL3AlertDescription; |
+ |
+@@ -215,6 +219,9 @@ typedef enum { |
+ kea_dh_anon, |
+ kea_dh_anon_export, |
+ kea_rsa_fips, |
++ kea_srp, |
++ kea_srp_rsa, |
++ kea_srp_dss, |
+ kea_ecdh_ecdsa, |
+ kea_ecdhe_ecdsa, |
+ kea_ecdh_rsa, |
+diff --git a/net/third_party/nss/ssl/sslauth.c b/net/third_party/nss/ssl/sslauth.c |
+index 3f4924d..12c9a12 100644 |
+--- a/net/third_party/nss/ssl/sslauth.c |
++++ b/net/third_party/nss/ssl/sslauth.c |
+@@ -291,6 +291,80 @@ SSL_SetPKCS11PinArg(PRFileDesc *s, void *arg) |
+ return SECSuccess; |
+ } |
+ |
++/* register callback function to provide the user password */ |
++SECStatus |
++SSL_UserPasswdHook(PRFileDesc *s, SSLUserPasswdCB func, void *arg) |
++{ |
++ sslSocket *ss; |
++ |
++ ss = ssl_FindSocket(s); |
++ if (!ss) { |
++ SSL_DBG(("%d: SSL[%d]: bad socket in UserPasswdHook", |
++ SSL_GETPID(), s)); |
++ return SECFailure; |
++ } |
++ |
++ ss->getUserPasswd = func; |
++ ss->getUserPasswdArg = arg; |
++ return SECSuccess; |
++} |
++ |
++/* used by client to provide user credentials non-interactively */ |
++SECStatus |
++SSL_SetUserLogin(PRFileDesc *s, char *user, char *passwd) |
++{ |
++ sslSocket *ss = NULL; |
++ int len; |
++ |
++ ss = ssl_FindSocket(s); |
++ if (!ss) { |
++ SSL_DBG(("%d: SSL[%d]: bad socket in GetClientAuthDataHook", |
++ SSL_GETPID(), s)); |
++ return SECFailure; |
++ } |
++ |
++ if (user) { |
++ len = PORT_Strlen(user); |
++ if (len > MAX_SRP_USERNAME_LENGTH) |
++ len = MAX_SRP_USERNAME_LENGTH; |
++ ss->sec.userName = SECITEM_AllocItem(NULL, NULL, len); |
++ if (!ss->sec.userName) { |
++ PORT_SetError(SEC_ERROR_NO_MEMORY); |
++ return SECFailure; |
++ } |
++ PORT_Memcpy(ss->sec.userName->data, user, ss->sec.userName->len); |
++ } |
++ |
++ if (passwd) { |
++ len = PORT_Strlen(passwd); |
++ ss->sec.userPasswd = SECITEM_AllocItem(NULL, NULL, len); |
++ if (!ss->sec.userPasswd) { |
++ PORT_SetError(SEC_ERROR_NO_MEMORY); |
++ return SECFailure; |
++ } |
++ PORT_Memcpy(ss->sec.userPasswd->data, passwd, ss->sec.userPasswd->len); |
++ } |
++ |
++ return SECSuccess; |
++} |
++ |
++/* register callback function to provide SRP user authentication params */ |
++SECStatus |
++SSL_GetSRPParamsHook(PRFileDesc *s, SSLGetSRPParamsCB func, void *arg) |
++{ |
++ sslSocket *ss; |
++ |
++ ss = ssl_FindSocket(s); |
++ if (!ss) { |
++ SSL_DBG(("%d: SSL[%d]: bad socket in GetClientAuthDataHook", |
++ SSL_GETPID(), s)); |
++ return SECFailure; |
++ } |
++ |
++ ss->getSRPParams = func; |
++ ss->getSRPParamsArg = arg; |
++ return SECSuccess; |
++} |
+ |
+ /* This is the "default" authCert callback function. It is called when a |
+ * certificate message is received from the peer and the local application |
+diff --git a/net/third_party/nss/ssl/sslenum.c b/net/third_party/nss/ssl/sslenum.c |
+index b8aa8cc..196ed30 100644 |
+--- a/net/third_party/nss/ssl/sslenum.c |
++++ b/net/third_party/nss/ssl/sslenum.c |
+@@ -74,6 +74,9 @@ const PRUint16 SSL_ImplementedCiphers[] = { |
+ #endif /* NSS_ENABLE_ECC */ |
+ TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, |
+ TLS_RSA_WITH_AES_256_CBC_SHA, |
++ TLS_SRP_SHA_WITH_AES_256_CBC_SHA, |
++ TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, |
++ TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, |
+ |
+ /* 128-bit */ |
+ #ifdef NSS_ENABLE_ECC |
+@@ -98,12 +101,16 @@ const PRUint16 SSL_ImplementedCiphers[] = { |
+ SSL_RSA_WITH_RC4_128_MD5, |
+ SSL_RSA_WITH_RC4_128_SHA, |
+ TLS_RSA_WITH_AES_128_CBC_SHA, |
++ TLS_SRP_SHA_WITH_AES_128_CBC_SHA, |
++ TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, |
++ TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, |
+ |
+ /* 112-bit 3DES */ |
+ #ifdef NSS_ENABLE_ECC |
+ TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, |
+ TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, |
+ #endif /* NSS_ENABLE_ECC */ |
++ TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, |
+ SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA, |
+ SSL_DHE_DSS_WITH_3DES_EDE_CBC_SHA, |
+ #ifdef NSS_ENABLE_ECC |
+@@ -112,6 +119,8 @@ const PRUint16 SSL_ImplementedCiphers[] = { |
+ #endif /* NSS_ENABLE_ECC */ |
+ SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA, |
+ SSL_RSA_WITH_3DES_EDE_CBC_SHA, |
++ TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, |
++ TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, |
+ |
+ /* 56-bit DES "domestic" cipher suites */ |
+ SSL_DHE_RSA_WITH_DES_CBC_SHA, |
+diff --git a/net/third_party/nss/ssl/sslerr.h b/net/third_party/nss/ssl/sslerr.h |
+index eb56ea9..a0c4b9d 100644 |
+--- a/net/third_party/nss/ssl/sslerr.h |
++++ b/net/third_party/nss/ssl/sslerr.h |
+@@ -205,6 +205,8 @@ SSL_ERROR_WEAK_SERVER_KEY = (SSL_ERROR_BASE + 115), |
+ |
+ SSL_ERROR_RX_UNEXPECTED_CERT_STATUS = (SSL_ERROR_BASE + 116), |
+ |
++SSL_ERROR_UNKNOWN_PSK_IDENTITY_ALERT = (SSL_ERROR_BASE + 116), |
++ |
+ SSL_ERROR_END_OF_LIST /* let the c compiler determine the value of this. */ |
+ } SSLErrorCodes; |
+ #endif /* NO_SECURITY_ERROR_ENUM */ |
+diff --git a/net/third_party/nss/ssl/sslimpl.h b/net/third_party/nss/ssl/sslimpl.h |
+index 1ea82da..9eed38b 100644 |
+--- a/net/third_party/nss/ssl/sslimpl.h |
++++ b/net/third_party/nss/ssl/sslimpl.h |
+@@ -317,9 +317,10 @@ typedef struct { |
+ } ssl3CipherSuiteCfg; |
+ |
+ #ifdef NSS_ENABLE_ECC |
+-#define ssl_V3_SUITES_IMPLEMENTED 50 |
++#define ssl_V3_SUITES_IMPLEMENTED 60 |
+ #else |
+-#define ssl_V3_SUITES_IMPLEMENTED 30 |
++#define ssl_V3_SUITES_IMPLEMENTED 60 |
++/* TODO(sqs): where do these #s come from? I think sslsock.c, where we added 9 more SRP suites. before tls-srp patch, these were 59 and 39. */ |
+ #endif /* NSS_ENABLE_ECC */ |
+ |
+ typedef struct sslOptionsStr { |
+@@ -1050,6 +1051,8 @@ struct sslSecurityInfoStr { |
+ CERTCertificate *localCert; /* ssl 2 & 3 */ |
+ CERTCertificate *peerCert; /* ssl 2 & 3 */ |
+ SECKEYPublicKey *peerKey; /* ssl3 only */ |
++ SECItem *userName; /* SSL username credential */ |
++ SECItem *userPasswd; /* SSL userpasswd credential */ |
+ |
+ SSLSignType authAlgorithm; |
+ PRUint32 authKeyBits; |
+@@ -1159,6 +1162,10 @@ const unsigned char * preferredCipher; |
+ SSLHandshakeCallback handshakeCallback; |
+ void *handshakeCallbackData; |
+ void *pkcs11PinArg; |
++ SSLUserPasswdCB getUserPasswd; |
++ void *getUserPasswdArg; |
++ SSLGetSRPParamsCB getSRPParams; |
++ void *getSRPParamsArg; |
+ |
+ PRIntervalTime rTimeout; /* timeout for NSPR I/O */ |
+ PRIntervalTime wTimeout; /* timeout for NSPR I/O */ |
+diff --git a/net/third_party/nss/ssl/sslproto.h b/net/third_party/nss/ssl/sslproto.h |
+index b534d0b..cbf6250 100644 |
+--- a/net/third_party/nss/ssl/sslproto.h |
++++ b/net/third_party/nss/ssl/sslproto.h |
+@@ -220,6 +220,16 @@ |
+ #define TLS_ECDH_anon_WITH_AES_128_CBC_SHA 0xC018 |
+ #define TLS_ECDH_anon_WITH_AES_256_CBC_SHA 0xC019 |
+ |
++#define TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA 0xC01A |
++#define TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA 0xC01B |
++#define TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA 0xC01C |
++#define TLS_SRP_SHA_WITH_AES_128_CBC_SHA 0xC01D |
++#define TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA 0xC01E |
++#define TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA 0xC01F |
++#define TLS_SRP_SHA_WITH_AES_256_CBC_SHA 0xC020 |
++#define TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA 0xC021 |
++#define TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA 0xC022 |
++ |
+ /* Netscape "experimental" cipher suites. */ |
+ #define SSL_RSA_OLDFIPS_WITH_3DES_EDE_CBC_SHA 0xffe0 |
+ #define SSL_RSA_OLDFIPS_WITH_DES_CBC_SHA 0xffe1 |
+diff --git a/net/third_party/nss/ssl/sslsock.c b/net/third_party/nss/ssl/sslsock.c |
+index b14a935..18ee612 100644 |
+--- a/net/third_party/nss/ssl/sslsock.c |
++++ b/net/third_party/nss/ssl/sslsock.c |
+@@ -102,6 +102,15 @@ static cipherPolicy ssl_ciphers[] = { /* Export France */ |
+ { TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, SSL_NOT_ALLOWED, SSL_NOT_ALLOWED }, |
+ { TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, SSL_NOT_ALLOWED, SSL_NOT_ALLOWED }, |
+ { TLS_RSA_WITH_SEED_CBC_SHA, SSL_NOT_ALLOWED, SSL_NOT_ALLOWED }, |
++ { TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, SSL_NOT_ALLOWED, SSL_NOT_ALLOWED }, |
++ { TLS_SRP_SHA_WITH_AES_128_CBC_SHA, SSL_NOT_ALLOWED, SSL_NOT_ALLOWED }, |
++ { TLS_SRP_SHA_WITH_AES_256_CBC_SHA, SSL_NOT_ALLOWED, SSL_NOT_ALLOWED }, |
++ { TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, SSL_NOT_ALLOWED, SSL_NOT_ALLOWED }, |
++ { TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, SSL_NOT_ALLOWED, SSL_NOT_ALLOWED }, |
++ { TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, SSL_NOT_ALLOWED, SSL_NOT_ALLOWED }, |
++ { TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, SSL_NOT_ALLOWED, SSL_NOT_ALLOWED }, |
++ { TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, SSL_NOT_ALLOWED, SSL_NOT_ALLOWED }, |
++ { TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, SSL_NOT_ALLOWED, SSL_NOT_ALLOWED }, |
+ { TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA, SSL_ALLOWED, SSL_NOT_ALLOWED }, |
+ { TLS_RSA_EXPORT1024_WITH_RC4_56_SHA, SSL_ALLOWED, SSL_NOT_ALLOWED }, |
+ #ifdef NSS_ENABLE_ECC |
+diff --git a/net/third_party/nss/ssl/sslt.h b/net/third_party/nss/ssl/sslt.h |
+index 3fa3f9b..172364c 100644 |
+--- a/net/third_party/nss/ssl/sslt.h |
++++ b/net/third_party/nss/ssl/sslt.h |
+@@ -74,6 +74,7 @@ typedef enum { |
+ ssl_kea_dh = 2, |
+ ssl_kea_fortezza = 3, /* deprecated, now unused */ |
+ ssl_kea_ecdh = 4, |
++ ssl_kea_srp = 5, |
+ ssl_kea_size /* number of ssl_kea_ algorithms */ |
+ } SSLKEAType; |
+ |
+@@ -88,6 +89,7 @@ typedef enum { |
+ #define kt_fortezza ssl_kea_fortezza /* deprecated, now unused */ |
+ #define kt_ecdh ssl_kea_ecdh |
+ #define kt_kea_size ssl_kea_size |
++#define kt_srp ssl_kea_srp |
+ |
+ typedef enum { |
+ ssl_sign_null = 0, |
+@@ -203,13 +205,14 @@ typedef enum { |
+ ssl_elliptic_curves_xtn = 10, |
+ ssl_ec_point_formats_xtn = 11, |
+ #endif |
++ ssl_srp_hello_xtn = 12, |
+ ssl_session_ticket_xtn = 35, |
+ ssl_next_proto_neg_xtn = 13172, |
+ ssl_snap_start_xtn = 13174, |
+ ssl_renegotiation_info_xtn = 0xff01 /* experimental number */ |
+ } SSLExtensionType; |
+ |
+-#define SSL_MAX_EXTENSIONS 8 |
++#define SSL_MAX_EXTENSIONS 9 |
+ |
+ typedef enum { |
+ /* No Snap Start handshake was attempted. */ |