OLD | NEW |
(Empty) | |
| 1 #include "BigUnsigned.hh" |
| 2 |
| 3 // Memory management definitions have moved to the bottom of NumberlikeArray.hh. |
| 4 |
| 5 // The templates used by these constructors and converters are at the bottom of |
| 6 // BigUnsigned.hh. |
| 7 |
| 8 BigUnsigned::BigUnsigned(unsigned long x) { initFromPrimitive (x); } |
| 9 BigUnsigned::BigUnsigned(unsigned int x) { initFromPrimitive (x); } |
| 10 BigUnsigned::BigUnsigned(unsigned short x) { initFromPrimitive (x); } |
| 11 BigUnsigned::BigUnsigned( long x) { initFromSignedPrimitive(x); } |
| 12 BigUnsigned::BigUnsigned( int x) { initFromSignedPrimitive(x); } |
| 13 BigUnsigned::BigUnsigned( short x) { initFromSignedPrimitive(x); } |
| 14 |
| 15 unsigned long BigUnsigned::toUnsignedLong () const { return convertToPrimitive
<unsigned long >(); } |
| 16 unsigned int BigUnsigned::toUnsignedInt () const { return convertToPrimitive
<unsigned int >(); } |
| 17 unsigned short BigUnsigned::toUnsignedShort() const { return convertToPrimitive
<unsigned short>(); } |
| 18 long BigUnsigned::toLong () const { return convertToSignedPrim
itive< long >(); } |
| 19 int BigUnsigned::toInt () const { return convertToSignedPrim
itive< int >(); } |
| 20 short BigUnsigned::toShort () const { return convertToSignedPrim
itive< short>(); } |
| 21 |
| 22 // BIT/BLOCK ACCESSORS |
| 23 |
| 24 void BigUnsigned::setBlock(Index i, Blk newBlock) { |
| 25 if (newBlock == 0) { |
| 26 if (i < len) { |
| 27 blk[i] = 0; |
| 28 zapLeadingZeros(); |
| 29 } |
| 30 // If i >= len, no effect. |
| 31 } else { |
| 32 if (i >= len) { |
| 33 // The nonzero block extends the number. |
| 34 allocateAndCopy(i+1); |
| 35 // Zero any added blocks that we aren't setting. |
| 36 for (Index j = len; j < i; j++) |
| 37 blk[j] = 0; |
| 38 len = i+1; |
| 39 } |
| 40 blk[i] = newBlock; |
| 41 } |
| 42 } |
| 43 |
| 44 /* Evidently the compiler wants BigUnsigned:: on the return type because, at |
| 45 * that point, it hasn't yet parsed the BigUnsigned:: on the name to get the |
| 46 * proper scope. */ |
| 47 BigUnsigned::Index BigUnsigned::bitLength() const { |
| 48 if (isZero()) |
| 49 return 0; |
| 50 else { |
| 51 Blk leftmostBlock = getBlock(len - 1); |
| 52 Index leftmostBlockLen = 0; |
| 53 while (leftmostBlock != 0) { |
| 54 leftmostBlock >>= 1; |
| 55 leftmostBlockLen++; |
| 56 } |
| 57 return leftmostBlockLen + (len - 1) * N; |
| 58 } |
| 59 } |
| 60 |
| 61 void BigUnsigned::setBit(Index bi, bool newBit) { |
| 62 Index blockI = bi / N; |
| 63 Blk block = getBlock(blockI), mask = Blk(1) << (bi % N); |
| 64 block = newBit ? (block | mask) : (block & ~mask); |
| 65 setBlock(blockI, block); |
| 66 } |
| 67 |
| 68 // COMPARISON |
| 69 BigUnsigned::CmpRes BigUnsigned::compareTo(const BigUnsigned &x) const { |
| 70 // A bigger length implies a bigger number. |
| 71 if (len < x.len) |
| 72 return less; |
| 73 else if (len > x.len) |
| 74 return greater; |
| 75 else { |
| 76 // Compare blocks one by one from left to right. |
| 77 Index i = len; |
| 78 while (i > 0) { |
| 79 i--; |
| 80 if (blk[i] == x.blk[i]) |
| 81 continue; |
| 82 else if (blk[i] > x.blk[i]) |
| 83 return greater; |
| 84 else |
| 85 return less; |
| 86 } |
| 87 // If no blocks differed, the numbers are equal. |
| 88 return equal; |
| 89 } |
| 90 } |
| 91 |
| 92 // COPY-LESS OPERATIONS |
| 93 |
| 94 /* |
| 95 * On most calls to copy-less operations, it's safe to read the inputs little by |
| 96 * little and write the outputs little by little. However, if one of the |
| 97 * inputs is coming from the same variable into which the output is to be |
| 98 * stored (an "aliased" call), we risk overwriting the input before we read it. |
| 99 * In this case, we first compute the result into a temporary BigUnsigned |
| 100 * variable and then copy it into the requested output variable *this. |
| 101 * Each put-here operation uses the DTRT_ALIASED macro (Do The Right Thing on |
| 102 * aliased calls) to generate code for this check. |
| 103 * |
| 104 * I adopted this approach on 2007.02.13 (see Assignment Operators in |
| 105 * BigUnsigned.hh). Before then, put-here operations rejected aliased calls |
| 106 * with an exception. I think doing the right thing is better. |
| 107 * |
| 108 * Some of the put-here operations can probably handle aliased calls safely |
| 109 * without the extra copy because (for example) they process blocks strictly |
| 110 * right-to-left. At some point I might determine which ones don't need the |
| 111 * copy, but my reasoning would need to be verified very carefully. For now |
| 112 * I'll leave in the copy. |
| 113 */ |
| 114 #define DTRT_ALIASED(cond, op) \ |
| 115 if (cond) { \ |
| 116 BigUnsigned tmpThis; \ |
| 117 tmpThis.op; \ |
| 118 *this = tmpThis; \ |
| 119 return; \ |
| 120 } |
| 121 |
| 122 |
| 123 |
| 124 void BigUnsigned::add(const BigUnsigned &a, const BigUnsigned &b) { |
| 125 DTRT_ALIASED(this == &a || this == &b, add(a, b)); |
| 126 // If one argument is zero, copy the other. |
| 127 if (a.len == 0) { |
| 128 operator =(b); |
| 129 return; |
| 130 } else if (b.len == 0) { |
| 131 operator =(a); |
| 132 return; |
| 133 } |
| 134 // Some variables... |
| 135 // Carries in and out of an addition stage |
| 136 bool carryIn, carryOut; |
| 137 Blk temp; |
| 138 Index i; |
| 139 // a2 points to the longer input, b2 points to the shorter |
| 140 const BigUnsigned *a2, *b2; |
| 141 if (a.len >= b.len) { |
| 142 a2 = &a; |
| 143 b2 = &b; |
| 144 } else { |
| 145 a2 = &b; |
| 146 b2 = &a; |
| 147 } |
| 148 // Set prelimiary length and make room in this BigUnsigned |
| 149 len = a2->len + 1; |
| 150 allocate(len); |
| 151 // For each block index that is present in both inputs... |
| 152 for (i = 0, carryIn = false; i < b2->len; i++) { |
| 153 // Add input blocks |
| 154 temp = a2->blk[i] + b2->blk[i]; |
| 155 // If a rollover occurred, the result is less than either input. |
| 156 // This test is used many times in the BigUnsigned code. |
| 157 carryOut = (temp < a2->blk[i]); |
| 158 // If a carry was input, handle it |
| 159 if (carryIn) { |
| 160 temp++; |
| 161 carryOut |= (temp == 0); |
| 162 } |
| 163 blk[i] = temp; // Save the addition result |
| 164 carryIn = carryOut; // Pass the carry along |
| 165 } |
| 166 // If there is a carry left over, increase blocks until |
| 167 // one does not roll over. |
| 168 for (; i < a2->len && carryIn; i++) { |
| 169 temp = a2->blk[i] + 1; |
| 170 carryIn = (temp == 0); |
| 171 blk[i] = temp; |
| 172 } |
| 173 // If the carry was resolved but the larger number |
| 174 // still has blocks, copy them over. |
| 175 for (; i < a2->len; i++) |
| 176 blk[i] = a2->blk[i]; |
| 177 // Set the extra block if there's still a carry, decrease length otherwi
se |
| 178 if (carryIn) |
| 179 blk[i] = 1; |
| 180 else |
| 181 len--; |
| 182 } |
| 183 |
| 184 void BigUnsigned::subtract(const BigUnsigned &a, const BigUnsigned &b) { |
| 185 DTRT_ALIASED(this == &a || this == &b, subtract(a, b)); |
| 186 if (b.len == 0) { |
| 187 // If b is zero, copy a. |
| 188 operator =(a); |
| 189 return; |
| 190 } else if (a.len < b.len) |
| 191 // If a is shorter than b, the result is negative. |
| 192 throw "BigUnsigned::subtract: " |
| 193 "Negative result in unsigned calculation"; |
| 194 // Some variables... |
| 195 bool borrowIn, borrowOut; |
| 196 Blk temp; |
| 197 Index i; |
| 198 // Set preliminary length and make room |
| 199 len = a.len; |
| 200 allocate(len); |
| 201 // For each block index that is present in both inputs... |
| 202 for (i = 0, borrowIn = false; i < b.len; i++) { |
| 203 temp = a.blk[i] - b.blk[i]; |
| 204 // If a reverse rollover occurred, |
| 205 // the result is greater than the block from a. |
| 206 borrowOut = (temp > a.blk[i]); |
| 207 // Handle an incoming borrow |
| 208 if (borrowIn) { |
| 209 borrowOut |= (temp == 0); |
| 210 temp--; |
| 211 } |
| 212 blk[i] = temp; // Save the subtraction result |
| 213 borrowIn = borrowOut; // Pass the borrow along |
| 214 } |
| 215 // If there is a borrow left over, decrease blocks until |
| 216 // one does not reverse rollover. |
| 217 for (; i < a.len && borrowIn; i++) { |
| 218 borrowIn = (a.blk[i] == 0); |
| 219 blk[i] = a.blk[i] - 1; |
| 220 } |
| 221 /* If there's still a borrow, the result is negative. |
| 222 * Throw an exception, but zero out this object so as to leave it in a |
| 223 * predictable state. */ |
| 224 if (borrowIn) { |
| 225 len = 0; |
| 226 throw "BigUnsigned::subtract: Negative result in unsigned calcul
ation"; |
| 227 } else |
| 228 // Copy over the rest of the blocks |
| 229 for (; i < a.len; i++) |
| 230 blk[i] = a.blk[i]; |
| 231 // Zap leading zeros |
| 232 zapLeadingZeros(); |
| 233 } |
| 234 |
| 235 /* |
| 236 * About the multiplication and division algorithms: |
| 237 * |
| 238 * I searched unsucessfully for fast C++ built-in operations like the `b_0' |
| 239 * and `c_0' Knuth describes in Section 4.3.1 of ``The Art of Computer |
| 240 * Programming'' (replace `place' by `Blk'): |
| 241 * |
| 242 * ``b_0[:] multiplication of a one-place integer by another one-place |
| 243 * integer, giving a two-place answer; |
| 244 * |
| 245 * ``c_0[:] division of a two-place integer by a one-place integer, |
| 246 * provided that the quotient is a one-place integer, and yielding |
| 247 * also a one-place remainder.'' |
| 248 * |
| 249 * I also missed his note that ``[b]y adjusting the word size, if |
| 250 * necessary, nearly all computers will have these three operations |
| 251 * available'', so I gave up on trying to use algorithms similar to his. |
| 252 * A future version of the library might include such algorithms; I |
| 253 * would welcome contributions from others for this. |
| 254 * |
| 255 * I eventually decided to use bit-shifting algorithms. To multiply `a' |
| 256 * and `b', we zero out the result. Then, for each `1' bit in `a', we |
| 257 * shift `b' left the appropriate amount and add it to the result. |
| 258 * Similarly, to divide `a' by `b', we shift `b' left varying amounts, |
| 259 * repeatedly trying to subtract it from `a'. When we succeed, we note |
| 260 * the fact by setting a bit in the quotient. While these algorithms |
| 261 * have the same O(n^2) time complexity as Knuth's, the ``constant factor'' |
| 262 * is likely to be larger. |
| 263 * |
| 264 * Because I used these algorithms, which require single-block addition |
| 265 * and subtraction rather than single-block multiplication and division, |
| 266 * the innermost loops of all four routines are very similar. Study one |
| 267 * of them and all will become clear. |
| 268 */ |
| 269 |
| 270 /* |
| 271 * This is a little inline function used by both the multiplication |
| 272 * routine and the division routine. |
| 273 * |
| 274 * `getShiftedBlock' returns the `x'th block of `num << y'. |
| 275 * `y' may be anything from 0 to N - 1, and `x' may be anything from |
| 276 * 0 to `num.len'. |
| 277 * |
| 278 * Two things contribute to this block: |
| 279 * |
| 280 * (1) The `N - y' low bits of `num.blk[x]', shifted `y' bits left. |
| 281 * |
| 282 * (2) The `y' high bits of `num.blk[x-1]', shifted `N - y' bits right. |
| 283 * |
| 284 * But we must be careful if `x == 0' or `x == num.len', in |
| 285 * which case we should use 0 instead of (2) or (1), respectively. |
| 286 * |
| 287 * If `y == 0', then (2) contributes 0, as it should. However, |
| 288 * in some computer environments, for a reason I cannot understand, |
| 289 * `a >> b' means `a >> (b % N)'. This means `num.blk[x-1] >> (N - y)' |
| 290 * will return `num.blk[x-1]' instead of the desired 0 when `y == 0'; |
| 291 * the test `y == 0' handles this case specially. |
| 292 */ |
| 293 inline BigUnsigned::Blk getShiftedBlock(const BigUnsigned &num, |
| 294 BigUnsigned::Index x, unsigned int y) { |
| 295 BigUnsigned::Blk part1 = (x == 0 || y == 0) ? 0 : (num.blk[x - 1] >> (Bi
gUnsigned::N - y)); |
| 296 BigUnsigned::Blk part2 = (x == num.len) ? 0 : (num.blk[x] << y); |
| 297 return part1 | part2; |
| 298 } |
| 299 |
| 300 void BigUnsigned::multiply(const BigUnsigned &a, const BigUnsigned &b) { |
| 301 DTRT_ALIASED(this == &a || this == &b, multiply(a, b)); |
| 302 // If either a or b is zero, set to zero. |
| 303 if (a.len == 0 || b.len == 0) { |
| 304 len = 0; |
| 305 return; |
| 306 } |
| 307 /* |
| 308 * Overall method: |
| 309 * |
| 310 * Set this = 0. |
| 311 * For each 1-bit of `a' (say the `i2'th bit of block `i'): |
| 312 * Add `b << (i blocks and i2 bits)' to *this. |
| 313 */ |
| 314 // Variables for the calculation |
| 315 Index i, j, k; |
| 316 unsigned int i2; |
| 317 Blk temp; |
| 318 bool carryIn, carryOut; |
| 319 // Set preliminary length and make room |
| 320 len = a.len + b.len; |
| 321 allocate(len); |
| 322 // Zero out this object |
| 323 for (i = 0; i < len; i++) |
| 324 blk[i] = 0; |
| 325 // For each block of the first number... |
| 326 for (i = 0; i < a.len; i++) { |
| 327 // For each 1-bit of that block... |
| 328 for (i2 = 0; i2 < N; i2++) { |
| 329 if ((a.blk[i] & (Blk(1) << i2)) == 0) |
| 330 continue; |
| 331 /* |
| 332 * Add b to this, shifted left i blocks and i2 bits. |
| 333 * j is the index in b, and k = i + j is the index in th
is. |
| 334 * |
| 335 * `getShiftedBlock', a short inline function defined ab
ove, |
| 336 * is now used for the bit handling. It replaces the mo
re |
| 337 * complex `bHigh' code, in which each run of the loop d
ealt |
| 338 * immediately with the low bits and saved the high bits
to |
| 339 * be picked up next time. The last run of the loop use
d to |
| 340 * leave leftover high bits, which were handled separate
ly. |
| 341 * Instead, this loop runs an additional time with j ==
b.len. |
| 342 * These changes were made on 2005.01.11. |
| 343 */ |
| 344 for (j = 0, k = i, carryIn = false; j <= b.len; j++, k++
) { |
| 345 /* |
| 346 * The body of this loop is very similar to the
body of the first loop |
| 347 * in `add', except that this loop does a `+=' i
nstead of a `+'. |
| 348 */ |
| 349 temp = blk[k] + getShiftedBlock(b, j, i2); |
| 350 carryOut = (temp < blk[k]); |
| 351 if (carryIn) { |
| 352 temp++; |
| 353 carryOut |= (temp == 0); |
| 354 } |
| 355 blk[k] = temp; |
| 356 carryIn = carryOut; |
| 357 } |
| 358 // No more extra iteration to deal with `bHigh'. |
| 359 // Roll-over a carry as necessary. |
| 360 for (; carryIn; k++) { |
| 361 blk[k]++; |
| 362 carryIn = (blk[k] == 0); |
| 363 } |
| 364 } |
| 365 } |
| 366 // Zap possible leading zero |
| 367 if (blk[len - 1] == 0) |
| 368 len--; |
| 369 } |
| 370 |
| 371 /* |
| 372 * DIVISION WITH REMAINDER |
| 373 * This monstrous function mods *this by the given divisor b while storing the |
| 374 * quotient in the given object q; at the end, *this contains the remainder. |
| 375 * The seemingly bizarre pattern of inputs and outputs was chosen so that the |
| 376 * function copies as little as possible (since it is implemented by repeated |
| 377 * subtraction of multiples of b from *this). |
| 378 * |
| 379 * "modWithQuotient" might be a better name for this function, but I would |
| 380 * rather not change the name now. |
| 381 */ |
| 382 void BigUnsigned::divideWithRemainder(const BigUnsigned &b, BigUnsigned &q) { |
| 383 /* Defending against aliased calls is more complex than usual because we |
| 384 * are writing to both *this and q. |
| 385 * |
| 386 * It would be silly to try to write quotient and remainder to the |
| 387 * same variable. Rule that out right away. */ |
| 388 if (this == &q) |
| 389 throw "BigUnsigned::divideWithRemainder: Cannot write quotient a
nd remainder into the same variable"; |
| 390 /* Now *this and q are separate, so the only concern is that b might be |
| 391 * aliased to one of them. If so, use a temporary copy of b. */ |
| 392 if (this == &b || &q == &b) { |
| 393 BigUnsigned tmpB(b); |
| 394 divideWithRemainder(tmpB, q); |
| 395 return; |
| 396 } |
| 397 |
| 398 /* |
| 399 * Knuth's definition of mod (which this function uses) is somewhat |
| 400 * different from the C++ definition of % in case of division by 0. |
| 401 * |
| 402 * We let a / 0 == 0 (it doesn't matter much) and a % 0 == a, no |
| 403 * exceptions thrown. This allows us to preserve both Knuth's demand |
| 404 * that a mod 0 == a and the useful property that |
| 405 * (a / b) * b + (a % b) == a. |
| 406 */ |
| 407 if (b.len == 0) { |
| 408 q.len = 0; |
| 409 return; |
| 410 } |
| 411 |
| 412 /* |
| 413 * If *this.len < b.len, then *this < b, and we can be sure that b doesn
't go into |
| 414 * *this at all. The quotient is 0 and *this is already the remainder (
so leave it alone). |
| 415 */ |
| 416 if (len < b.len) { |
| 417 q.len = 0; |
| 418 return; |
| 419 } |
| 420 |
| 421 // At this point we know (*this).len >= b.len > 0. (Whew!) |
| 422 |
| 423 /* |
| 424 * Overall method: |
| 425 * |
| 426 * For each appropriate i and i2, decreasing: |
| 427 * Subtract (b << (i blocks and i2 bits)) from *this, storing the |
| 428 * result in subtractBuf. |
| 429 * If the subtraction succeeds with a nonnegative result: |
| 430 * Turn on bit i2 of block i of the quotient q. |
| 431 * Copy subtractBuf back into *this. |
| 432 * Otherwise bit i2 of block i remains off, and *this is unchanged. |
| 433 * |
| 434 * Eventually q will contain the entire quotient, and *this will |
| 435 * be left with the remainder. |
| 436 * |
| 437 * subtractBuf[x] corresponds to blk[x], not blk[x+i], since 2005.01.11. |
| 438 * But on a single iteration, we don't touch the i lowest blocks of blk |
| 439 * (and don't use those of subtractBuf) because these blocks are |
| 440 * unaffected by the subtraction: we are subtracting |
| 441 * (b << (i blocks and i2 bits)), which ends in at least `i' zero |
| 442 * blocks. */ |
| 443 // Variables for the calculation |
| 444 Index i, j, k; |
| 445 unsigned int i2; |
| 446 Blk temp; |
| 447 bool borrowIn, borrowOut; |
| 448 |
| 449 /* |
| 450 * Make sure we have an extra zero block just past the value. |
| 451 * |
| 452 * When we attempt a subtraction, we might shift `b' so |
| 453 * its first block begins a few bits left of the dividend, |
| 454 * and then we'll try to compare these extra bits with |
| 455 * a nonexistent block to the left of the dividend. The |
| 456 * extra zero block ensures sensible behavior; we need |
| 457 * an extra block in `subtractBuf' for exactly the same reason. |
| 458 */ |
| 459 Index origLen = len; // Save real length. |
| 460 /* To avoid an out-of-bounds access in case of reallocation, allocate |
| 461 * first and then increment the logical length. */ |
| 462 allocateAndCopy(len + 1); |
| 463 len++; |
| 464 blk[origLen] = 0; // Zero the added block. |
| 465 |
| 466 // subtractBuf holds part of the result of a subtraction; see above. |
| 467 Blk *subtractBuf = new Blk[len]; |
| 468 |
| 469 // Set preliminary length for quotient and make room |
| 470 q.len = origLen - b.len + 1; |
| 471 q.allocate(q.len); |
| 472 // Zero out the quotient |
| 473 for (i = 0; i < q.len; i++) |
| 474 q.blk[i] = 0; |
| 475 |
| 476 // For each possible left-shift of b in blocks... |
| 477 i = q.len; |
| 478 while (i > 0) { |
| 479 i--; |
| 480 // For each possible left-shift of b in bits... |
| 481 // (Remember, N is the number of bits in a Blk.) |
| 482 q.blk[i] = 0; |
| 483 i2 = N; |
| 484 while (i2 > 0) { |
| 485 i2--; |
| 486 /* |
| 487 * Subtract b, shifted left i blocks and i2 bits, from *
this, |
| 488 * and store the answer in subtractBuf. In the for loop
, `k == i + j'. |
| 489 * |
| 490 * Compare this to the middle section of `multiply'. Th
ey |
| 491 * are in many ways analogous. See especially the discu
ssion |
| 492 * of `getShiftedBlock'. |
| 493 */ |
| 494 for (j = 0, k = i, borrowIn = false; j <= b.len; j++, k+
+) { |
| 495 temp = blk[k] - getShiftedBlock(b, j, i2); |
| 496 borrowOut = (temp > blk[k]); |
| 497 if (borrowIn) { |
| 498 borrowOut |= (temp == 0); |
| 499 temp--; |
| 500 } |
| 501 // Since 2005.01.11, indices of `subtractBuf' di
rectly match those of `blk', so use `k'. |
| 502 subtractBuf[k] = temp; |
| 503 borrowIn = borrowOut; |
| 504 } |
| 505 // No more extra iteration to deal with `bHigh'. |
| 506 // Roll-over a borrow as necessary. |
| 507 for (; k < origLen && borrowIn; k++) { |
| 508 borrowIn = (blk[k] == 0); |
| 509 subtractBuf[k] = blk[k] - 1; |
| 510 } |
| 511 /* |
| 512 * If the subtraction was performed successfully (!borro
wIn), |
| 513 * set bit i2 in block i of the quotient. |
| 514 * |
| 515 * Then, copy the portion of subtractBuf filled by the s
ubtraction |
| 516 * back to *this. This portion starts with block i and
ends-- |
| 517 * where? Not necessarily at block `i + b.len'! Well,
we |
| 518 * increased k every time we saved a block into subtract
Buf, so |
| 519 * the region of subtractBuf we copy is just [i, k). |
| 520 */ |
| 521 if (!borrowIn) { |
| 522 q.blk[i] |= (Blk(1) << i2); |
| 523 while (k > i) { |
| 524 k--; |
| 525 blk[k] = subtractBuf[k]; |
| 526 } |
| 527 } |
| 528 } |
| 529 } |
| 530 // Zap possible leading zero in quotient |
| 531 if (q.blk[q.len - 1] == 0) |
| 532 q.len--; |
| 533 // Zap any/all leading zeros in remainder |
| 534 zapLeadingZeros(); |
| 535 // Deallocate subtractBuf. |
| 536 // (Thanks to Brad Spencer for noticing my accidental omission of this!) |
| 537 delete [] subtractBuf; |
| 538 } |
| 539 |
| 540 /* BITWISE OPERATORS |
| 541 * These are straightforward blockwise operations except that they differ in |
| 542 * the output length and the necessity of zapLeadingZeros. */ |
| 543 |
| 544 void BigUnsigned::bitAnd(const BigUnsigned &a, const BigUnsigned &b) { |
| 545 DTRT_ALIASED(this == &a || this == &b, bitAnd(a, b)); |
| 546 // The bitwise & can't be longer than either operand. |
| 547 len = (a.len >= b.len) ? b.len : a.len; |
| 548 allocate(len); |
| 549 Index i; |
| 550 for (i = 0; i < len; i++) |
| 551 blk[i] = a.blk[i] & b.blk[i]; |
| 552 zapLeadingZeros(); |
| 553 } |
| 554 |
| 555 void BigUnsigned::bitOr(const BigUnsigned &a, const BigUnsigned &b) { |
| 556 DTRT_ALIASED(this == &a || this == &b, bitOr(a, b)); |
| 557 Index i; |
| 558 const BigUnsigned *a2, *b2; |
| 559 if (a.len >= b.len) { |
| 560 a2 = &a; |
| 561 b2 = &b; |
| 562 } else { |
| 563 a2 = &b; |
| 564 b2 = &a; |
| 565 } |
| 566 allocate(a2->len); |
| 567 for (i = 0; i < b2->len; i++) |
| 568 blk[i] = a2->blk[i] | b2->blk[i]; |
| 569 for (; i < a2->len; i++) |
| 570 blk[i] = a2->blk[i]; |
| 571 len = a2->len; |
| 572 // Doesn't need zapLeadingZeros. |
| 573 } |
| 574 |
| 575 void BigUnsigned::bitXor(const BigUnsigned &a, const BigUnsigned &b) { |
| 576 DTRT_ALIASED(this == &a || this == &b, bitXor(a, b)); |
| 577 Index i; |
| 578 const BigUnsigned *a2, *b2; |
| 579 if (a.len >= b.len) { |
| 580 a2 = &a; |
| 581 b2 = &b; |
| 582 } else { |
| 583 a2 = &b; |
| 584 b2 = &a; |
| 585 } |
| 586 allocate(a2->len); |
| 587 for (i = 0; i < b2->len; i++) |
| 588 blk[i] = a2->blk[i] ^ b2->blk[i]; |
| 589 for (; i < a2->len; i++) |
| 590 blk[i] = a2->blk[i]; |
| 591 len = a2->len; |
| 592 zapLeadingZeros(); |
| 593 } |
| 594 |
| 595 void BigUnsigned::bitShiftLeft(const BigUnsigned &a, int b) { |
| 596 DTRT_ALIASED(this == &a, bitShiftLeft(a, b)); |
| 597 if (b < 0) { |
| 598 if (b << 1 == 0) |
| 599 throw "BigUnsigned::bitShiftLeft: " |
| 600 "Pathological shift amount not implemented"; |
| 601 else { |
| 602 bitShiftRight(a, -b); |
| 603 return; |
| 604 } |
| 605 } |
| 606 Index shiftBlocks = b / N; |
| 607 unsigned int shiftBits = b % N; |
| 608 // + 1: room for high bits nudged left into another block |
| 609 len = a.len + shiftBlocks + 1; |
| 610 allocate(len); |
| 611 Index i, j; |
| 612 for (i = 0; i < shiftBlocks; i++) |
| 613 blk[i] = 0; |
| 614 for (j = 0, i = shiftBlocks; j <= a.len; j++, i++) |
| 615 blk[i] = getShiftedBlock(a, j, shiftBits); |
| 616 // Zap possible leading zero |
| 617 if (blk[len - 1] == 0) |
| 618 len--; |
| 619 } |
| 620 |
| 621 void BigUnsigned::bitShiftRight(const BigUnsigned &a, int b) { |
| 622 DTRT_ALIASED(this == &a, bitShiftRight(a, b)); |
| 623 if (b < 0) { |
| 624 if (b << 1 == 0) |
| 625 throw "BigUnsigned::bitShiftRight: " |
| 626 "Pathological shift amount not implemented"; |
| 627 else { |
| 628 bitShiftLeft(a, -b); |
| 629 return; |
| 630 } |
| 631 } |
| 632 // This calculation is wacky, but expressing the shift as a left bit shi
ft |
| 633 // within each block lets us use getShiftedBlock. |
| 634 Index rightShiftBlocks = (b + N - 1) / N; |
| 635 unsigned int leftShiftBits = N * rightShiftBlocks - b; |
| 636 // Now (N * rightShiftBlocks - leftShiftBits) == b |
| 637 // and 0 <= leftShiftBits < N. |
| 638 if (rightShiftBlocks >= a.len + 1) { |
| 639 // All of a is guaranteed to be shifted off, even considering th
e left |
| 640 // bit shift. |
| 641 len = 0; |
| 642 return; |
| 643 } |
| 644 // Now we're allocating a positive amount. |
| 645 // + 1: room for high bits nudged left into another block |
| 646 len = a.len + 1 - rightShiftBlocks; |
| 647 allocate(len); |
| 648 Index i, j; |
| 649 for (j = rightShiftBlocks, i = 0; j <= a.len; j++, i++) |
| 650 blk[i] = getShiftedBlock(a, j, leftShiftBits); |
| 651 // Zap possible leading zero |
| 652 if (blk[len - 1] == 0) |
| 653 len--; |
| 654 } |
| 655 |
| 656 // INCREMENT/DECREMENT OPERATORS |
| 657 |
| 658 // Prefix increment |
| 659 void BigUnsigned::operator ++() { |
| 660 Index i; |
| 661 bool carry = true; |
| 662 for (i = 0; i < len && carry; i++) { |
| 663 blk[i]++; |
| 664 carry = (blk[i] == 0); |
| 665 } |
| 666 if (carry) { |
| 667 // Allocate and then increase length, as in divideWithRemainder |
| 668 allocateAndCopy(len + 1); |
| 669 len++; |
| 670 blk[i] = 1; |
| 671 } |
| 672 } |
| 673 |
| 674 // Postfix increment: same as prefix |
| 675 void BigUnsigned::operator ++(int) { |
| 676 operator ++(); |
| 677 } |
| 678 |
| 679 // Prefix decrement |
| 680 void BigUnsigned::operator --() { |
| 681 if (len == 0) |
| 682 throw "BigUnsigned::operator --(): Cannot decrement an unsigned
zero"; |
| 683 Index i; |
| 684 bool borrow = true; |
| 685 for (i = 0; borrow; i++) { |
| 686 borrow = (blk[i] == 0); |
| 687 blk[i]--; |
| 688 } |
| 689 // Zap possible leading zero (there can only be one) |
| 690 if (blk[len - 1] == 0) |
| 691 len--; |
| 692 } |
| 693 |
| 694 // Postfix decrement: same as prefix |
| 695 void BigUnsigned::operator --(int) { |
| 696 operator --(); |
| 697 } |
OLD | NEW |