Index: mozilla/security/nss/lib/freebl/ecl/ecp_256_32.c |
=================================================================== |
--- mozilla/security/nss/lib/freebl/ecl/ecp_256_32.c (revision 179928) |
+++ mozilla/security/nss/lib/freebl/ecl/ecp_256_32.c (working copy) |
@@ -160,12 +160,8 @@ |
/* NON_ZERO_TO_ALL_ONES returns: |
* 0xffffffff for 0 < x <= 2**31 |
* 0 for x == 0 or x > 2**31. |
- * |
- * This macro assumes that right-shifting a signed number shifts in the MSB on |
- * the left. This is not ensured by the C standard, but is true on the CPUs |
- * that we're targetting with this code (x86 and ARM). |
*/ |
-#define NON_ZERO_TO_ALL_ONES(x) (~((u32) (((s32) ((x)-1)) >> 31))) |
+#define NON_ZERO_TO_ALL_ONES(x) ((((x) - 1) >> 31) - 1) |
Ryan Sleevi
2013/01/31 22:53:38
Suggestion: Add u32 casts to make sure that the un
wtc
2013/02/01 05:28:04
Thank you for the suggestion. The argument |x| is
|
/* felem_reduce_carry adds a multiple of p in order to cancel |carry|, |
* which is a term at 2**257. |
@@ -1246,22 +1242,47 @@ |
/* Interface with Freebl: */ |
+/* BYTESWAP_MP_DIGIT_TO_LE swaps the bytes of a mp_digit to |
+ * little-endian order. |
+ */ |
#ifdef IS_BIG_ENDIAN |
-#error "This code needs a little-endian processor" |
+#ifdef __APPLE__ |
+#include <libkern/OSByteOrder.h> |
+#define BYTESWAP32(x) OSSwapInt32(x) |
+#define BYTESWAP64(x) OSSwapInt64(x) |
+#else |
+#define BYTESWAP32(x) \ |
+ ((x) >> 24 | (x) >> 8 & 0xff00 | ((x) & 0xff00) << 8 | (x) << 24) |
+#define BYTESWAP64(x) \ |
+ ((x) >> 56 | (x) >> 40 & 0xff00 | \ |
+ (x) >> 24 & 0xff0000 | (x) >> 8 & 0xff000000 | \ |
+ ((x) & 0xff000000) << 8 | ((x) & 0xff0000) << 24 | \ |
+ ((x) & 0xff00) << 40 | (x) << 56) |
#endif |
-static const u32 kRInvDigits[8] = { |
+#ifdef MP_USE_UINT_DIGIT |
wtc
2013/01/31 22:46:05
If this macro is defined, mp_digit is 32-bit. Othe
|
+#define BYTESWAP_MP_DIGIT_TO_LE(x) BYTESWAP32(BYTESWAP32(x)) |
+#else |
+#define BYTESWAP_MP_DIGIT_TO_LE(x) BYTESWAP64(BYTESWAP64(x)) |
+#endif |
+#endif /* IS_BIG_ENDIAN */ |
+ |
+#ifdef MP_USE_UINT_DIGIT |
+static const mp_digit kRInvDigits[8] = { |
0x80000000, 1, 0xffffffff, 0, |
0x80000001, 0xfffffffe, 1, 0x7fffffff |
}; |
+#else |
+static const mp_digit kRInvDigits[4] = { |
+ PR_UINT64(0x180000000), 0xffffffff, |
+ PR_UINT64(0xfffffffe80000001), PR_UINT64(0x7fffffff00000001) |
+}; |
+#endif |
#define MP_DIGITS_IN_256_BITS (32/sizeof(mp_digit)) |
static const mp_int kRInv = { |
MP_ZPOS, |
MP_DIGITS_IN_256_BITS, |
MP_DIGITS_IN_256_BITS, |
- /* Because we are running on a little-endian processor, this cast works for |
- * both 32 and 64-bit processors. |
- */ |
(mp_digit*) kRInvDigits |
}; |
@@ -1342,7 +1363,18 @@ |
* too short and we'll copy less data. |
*/ |
memset(out_scalar, 0, 32); |
+#ifdef IS_LITTLE_ENDIAN |
memcpy(out_scalar, MP_DIGITS(n), MP_USED(n) * sizeof(mp_digit)); |
wtc
2013/01/31 22:46:05
I found that this memcpy is done without checking
agl
2013/02/01 18:45:17
Yes. I had forgotten about that.
Is there a #defi
wtc
2013/02/01 19:28:35
NSS uses both DEBUG and NDEBUG. DEBUG is more comm
|
+#else |
+ { |
+ mp_size i; |
+ mp_digit swapped[MP_DIGITS_IN_256_BITS]; |
+ for (i = 0; i < MP_USED(n); i++) { |
+ swapped[i] = BYTESWAP_MP_DIGIT_TO_LE(MP_DIGIT(n, i)); |
+ } |
+ memcpy(out_scalar, swapped, MP_USED(n) * sizeof(mp_digit)); |
+ } |
+#endif |
} |
/* ec_GFp_nistp256_base_point_mul sets {out_x,out_y} = nG, where n is < the |