Index: runtime/lib/math.cc |
=================================================================== |
--- runtime/lib/math.cc (revision 40060) |
+++ runtime/lib/math.cc (working copy) |
@@ -6,7 +6,6 @@ |
#include "vm/bootstrap_natives.h" |
-#include "vm/bigint_operations.h" |
#include "vm/exceptions.h" |
#include "vm/native_entry.h" |
#include "vm/object.h" |
@@ -153,26 +152,34 @@ |
GET_NON_NULL_NATIVE_ARGUMENT(Integer, seed_int, arguments->NativeArgAt(0)); |
uint64_t seed = 0; |
if (seed_int.IsBigint()) { |
- const Bigint& mask64 = Bigint::Handle( |
- BigintOperations::NewFromUint64(0xffffffffffffffffLL)); |
Bigint& big_seed = Bigint::Handle(); |
big_seed ^= seed_int.raw(); |
uint64_t negate_mask = 0; |
+ uint64_t borrow = 0; |
if (big_seed.IsNegative()) { |
// Negate bits to make seed positive. |
// Negate bits again (by xor with negate_mask) when extracted below, |
// to get original bits. |
negate_mask = 0xffffffffffffffffLL; |
- big_seed ^= BigintOperations::BitNot(big_seed); |
+ |
+ // Instead of computing ~big_seed here, we compute it on the fly below as |
+ // follows: ~(-big_seed) == ~(~(big_seed-1)) == big_seed-1 |
+ borrow = 1; |
} |
- Bigint& low64 = Bigint::Handle(); |
+ const intptr_t used = big_seed.Used(); |
+ intptr_t digit = 0; |
do { |
- low64 = BigintOperations::BitAnd(big_seed, mask64); |
- ASSERT(BigintOperations::FitsIntoUint64(low64)); |
- uint64_t chunk = BigintOperations::ToUint64(low64) ^ negate_mask; |
- seed = (seed * 1037) ^ mix64(chunk); |
- big_seed = BigintOperations::ShiftRight(big_seed, 64); |
- } while (!big_seed.IsZero()); |
+ uint64_t low64 = ((digit + 1) < used) ? big_seed.DigitAt(digit + 1) : 0; |
+ low64 <<= 32; |
+ low64 |= (digit < used) ? big_seed.DigitAt(digit) : 0; |
+ low64 -= borrow; |
+ if ((borrow == 1) && (low64 != 0xffffffffffffffffLL)) { |
+ borrow = 0; |
+ } |
+ low64 ^= negate_mask; |
+ seed = (seed * 1037) ^ mix64(low64); |
+ digit += 2; |
+ } while (digit < used); |
} else { |
seed = mix64(static_cast<uint64_t>(seed_int.AsInt64Value())); |
} |