OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 518 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
529 __ Push(descriptor->register_params_[i]); | 529 __ Push(descriptor->register_params_[i]); |
530 } | 530 } |
531 ExternalReference miss = descriptor->miss_handler(); | 531 ExternalReference miss = descriptor->miss_handler(); |
532 __ CallExternalReference(miss, descriptor->register_param_count_); | 532 __ CallExternalReference(miss, descriptor->register_param_count_); |
533 } | 533 } |
534 | 534 |
535 __ Ret(); | 535 __ Ret(); |
536 } | 536 } |
537 | 537 |
538 | 538 |
| 539 void DoubleToIStub::Generate(MacroAssembler* masm) { |
| 540 Label done; |
| 541 Register input = source(); |
| 542 Register result = destination(); |
| 543 ASSERT(is_truncating()); |
| 544 |
| 545 ASSERT(result.Is64Bits()); |
| 546 ASSERT(jssp.Is(masm->StackPointer())); |
| 547 |
| 548 int double_offset = offset(); |
| 549 |
| 550 DoubleRegister double_scratch = d0; // only used if !skip_fastpath() |
| 551 Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result); |
| 552 Register scratch2 = |
| 553 GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1); |
| 554 |
| 555 __ Push(scratch1, scratch2); |
| 556 // Account for saved regs if input is jssp. |
| 557 if (input.is(jssp)) double_offset += 2 * kPointerSize; |
| 558 |
| 559 if (!skip_fastpath()) { |
| 560 __ Push(double_scratch); |
| 561 if (input.is(jssp)) double_offset += 1 * kDoubleSize; |
| 562 __ Ldr(double_scratch, MemOperand(input, double_offset)); |
| 563 // Try to convert with a FPU convert instruction. This handles all |
| 564 // non-saturating cases. |
| 565 __ TryInlineTruncateDoubleToI(result, double_scratch, &done); |
| 566 __ Fmov(result, double_scratch); |
| 567 } else { |
| 568 __ Ldr(result, MemOperand(input, double_offset)); |
| 569 } |
| 570 |
| 571 // If we reach here we need to manually convert the input to an int32. |
| 572 |
| 573 // Extract the exponent. |
| 574 Register exponent = scratch1; |
| 575 __ Ubfx(exponent, result, HeapNumber::kMantissaBits, |
| 576 HeapNumber::kExponentBits); |
| 577 |
| 578 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since |
| 579 // the mantissa gets shifted completely out of the int32_t result. |
| 580 __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32); |
| 581 __ CzeroX(result, ge); |
| 582 __ B(ge, &done); |
| 583 |
| 584 // The Fcvtzs sequence handles all cases except where the conversion causes |
| 585 // signed overflow in the int64_t target. Since we've already handled |
| 586 // exponents >= 84, we can guarantee that 63 <= exponent < 84. |
| 587 |
| 588 if (masm->emit_debug_code()) { |
| 589 __ Cmp(exponent, HeapNumber::kExponentBias + 63); |
| 590 // Exponents less than this should have been handled by the Fcvt case. |
| 591 __ Check(ge, kUnexpectedValue); |
| 592 } |
| 593 |
| 594 // Isolate the mantissa bits, and set the implicit '1'. |
| 595 Register mantissa = scratch2; |
| 596 __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits); |
| 597 __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits); |
| 598 |
| 599 // Negate the mantissa if necessary. |
| 600 __ Tst(result, kXSignMask); |
| 601 __ Cneg(mantissa, mantissa, ne); |
| 602 |
| 603 // Shift the mantissa bits in the correct place. We know that we have to shift |
| 604 // it left here, because exponent >= 63 >= kMantissaBits. |
| 605 __ Sub(exponent, exponent, |
| 606 HeapNumber::kExponentBias + HeapNumber::kMantissaBits); |
| 607 __ Lsl(result, mantissa, exponent); |
| 608 |
| 609 __ Bind(&done); |
| 610 if (!skip_fastpath()) { |
| 611 __ Pop(double_scratch); |
| 612 } |
| 613 __ Pop(scratch2, scratch1); |
| 614 __ Ret(); |
| 615 } |
| 616 |
| 617 |
539 // See call site for description. | 618 // See call site for description. |
540 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 619 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
541 Register left, | 620 Register left, |
542 Register right, | 621 Register right, |
543 Register scratch, | 622 Register scratch, |
544 FPRegister double_scratch, | 623 FPRegister double_scratch, |
545 Label* slow, | 624 Label* slow, |
546 Condition cond) { | 625 Condition cond) { |
547 ASSERT(!AreAliased(left, right, scratch)); | 626 ASSERT(!AreAliased(left, right, scratch)); |
548 Label not_identical, return_equal, heap_number; | 627 Label not_identical, return_equal, heap_number; |
(...skipping 5163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5712 MemOperand(fp, 6 * kPointerSize), | 5791 MemOperand(fp, 6 * kPointerSize), |
5713 NULL); | 5792 NULL); |
5714 } | 5793 } |
5715 | 5794 |
5716 | 5795 |
5717 #undef __ | 5796 #undef __ |
5718 | 5797 |
5719 } } // namespace v8::internal | 5798 } } // namespace v8::internal |
5720 | 5799 |
5721 #endif // V8_TARGET_ARCH_A64 | 5800 #endif // V8_TARGET_ARCH_A64 |
OLD | NEW |