OLD | NEW |
1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 581 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
592 ASSERT(!dst.is(kScratchRegister)); | 592 ASSERT(!dst.is(kScratchRegister)); |
593 if (src->value() == 0) { | 593 if (src->value() == 0) { |
594 testq(dst, dst); | 594 testq(dst, dst); |
595 } else { | 595 } else { |
596 Move(kScratchRegister, src); | 596 Move(kScratchRegister, src); |
597 cmpq(dst, kScratchRegister); | 597 cmpq(dst, kScratchRegister); |
598 } | 598 } |
599 } | 599 } |
600 | 600 |
601 | 601 |
602 void MacroAssembler::SmiCompare(Register dst, const Operand& src) { | |
603 cmpq(dst, src); | |
604 } | |
605 | |
606 | |
607 void MacroAssembler::SmiCompare(const Operand& dst, Register src) { | 602 void MacroAssembler::SmiCompare(const Operand& dst, Register src) { |
608 cmpq(dst, src); | 603 cmpq(dst, src); |
609 } | 604 } |
610 | 605 |
611 | 606 |
612 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { | 607 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { |
613 if (src->value() == 0) { | 608 if (src->value() == 0) { |
614 // Only tagged long smi to have 32-bit representation. | 609 // Only tagged long smi to have 32-bit representation. |
615 cmpq(dst, Immediate(0)); | 610 cmpq(dst, Immediate(0)); |
616 } else { | 611 } else { |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
732 j(not_equal, on_smi_result); | 727 j(not_equal, on_smi_result); |
733 } | 728 } |
734 } | 729 } |
735 | 730 |
736 | 731 |
737 void MacroAssembler::SmiAdd(Register dst, | 732 void MacroAssembler::SmiAdd(Register dst, |
738 Register src1, | 733 Register src1, |
739 Register src2, | 734 Register src2, |
740 Label* on_not_smi_result) { | 735 Label* on_not_smi_result) { |
741 ASSERT(!dst.is(src2)); | 736 ASSERT(!dst.is(src2)); |
742 if (on_not_smi_result == NULL) { | 737 if (dst.is(src1)) { |
743 // No overflow checking. Use only when it's known that | |
744 // overflowing is impossible. | |
745 if (dst.is(src1)) { | |
746 addq(dst, src2); | |
747 } else { | |
748 movq(dst, src1); | |
749 addq(dst, src2); | |
750 } | |
751 Assert(no_overflow, "Smi addition onverflow"); | |
752 } else if (dst.is(src1)) { | |
753 addq(dst, src2); | 738 addq(dst, src2); |
754 Label smi_result; | 739 Label smi_result; |
755 j(no_overflow, &smi_result); | 740 j(no_overflow, &smi_result); |
756 // Restore src1. | 741 // Restore src1. |
757 subq(src1, src2); | 742 subq(src1, src2); |
758 jmp(on_not_smi_result); | 743 jmp(on_not_smi_result); |
759 bind(&smi_result); | 744 bind(&smi_result); |
760 } else { | 745 } else { |
761 movq(dst, src1); | 746 movq(dst, src1); |
762 addq(dst, src2); | 747 addq(dst, src2); |
(...skipping 26 matching lines...) Expand all Loading... |
789 jmp(on_not_smi_result); | 774 jmp(on_not_smi_result); |
790 bind(&smi_result); | 775 bind(&smi_result); |
791 } else { | 776 } else { |
792 movq(dst, src1); | 777 movq(dst, src1); |
793 subq(dst, src2); | 778 subq(dst, src2); |
794 j(overflow, on_not_smi_result); | 779 j(overflow, on_not_smi_result); |
795 } | 780 } |
796 } | 781 } |
797 | 782 |
798 | 783 |
799 void MacroAssembler::SmiSub(Register dst, | |
800 Register src1, | |
801 Operand const& src2, | |
802 Label* on_not_smi_result) { | |
803 ASSERT(!dst.is(src2)); | |
804 if (on_not_smi_result == NULL) { | |
805 // No overflow checking. Use only when it's known that | |
806 // overflowing is impossible (e.g., subtracting two positive smis). | |
807 if (dst.is(src1)) { | |
808 subq(dst, src2); | |
809 } else { | |
810 movq(dst, src1); | |
811 subq(dst, src2); | |
812 } | |
813 Assert(no_overflow, "Smi substraction onverflow"); | |
814 } else if (dst.is(src1)) { | |
815 subq(dst, src2); | |
816 Label smi_result; | |
817 j(no_overflow, &smi_result); | |
818 // Restore src1. | |
819 addq(src1, src2); | |
820 jmp(on_not_smi_result); | |
821 bind(&smi_result); | |
822 } else { | |
823 movq(dst, src1); | |
824 subq(dst, src2); | |
825 j(overflow, on_not_smi_result); | |
826 } | |
827 } | |
828 | |
829 void MacroAssembler::SmiMul(Register dst, | 784 void MacroAssembler::SmiMul(Register dst, |
830 Register src1, | 785 Register src1, |
831 Register src2, | 786 Register src2, |
832 Label* on_not_smi_result) { | 787 Label* on_not_smi_result) { |
833 ASSERT(!dst.is(src2)); | 788 ASSERT(!dst.is(src2)); |
834 ASSERT(!dst.is(kScratchRegister)); | 789 ASSERT(!dst.is(kScratchRegister)); |
835 ASSERT(!src1.is(kScratchRegister)); | 790 ASSERT(!src1.is(kScratchRegister)); |
836 ASSERT(!src2.is(kScratchRegister)); | 791 ASSERT(!src2.is(kScratchRegister)); |
837 | 792 |
838 if (dst.is(src1)) { | 793 if (dst.is(src1)) { |
(...skipping 1703 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2542 | 2497 |
2543 | 2498 |
2544 void MacroAssembler::AllocateTwoByteString(Register result, | 2499 void MacroAssembler::AllocateTwoByteString(Register result, |
2545 Register length, | 2500 Register length, |
2546 Register scratch1, | 2501 Register scratch1, |
2547 Register scratch2, | 2502 Register scratch2, |
2548 Register scratch3, | 2503 Register scratch3, |
2549 Label* gc_required) { | 2504 Label* gc_required) { |
2550 // Calculate the number of bytes needed for the characters in the string while | 2505 // Calculate the number of bytes needed for the characters in the string while |
2551 // observing object alignment. | 2506 // observing object alignment. |
2552 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize & | 2507 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
2553 kObjectAlignmentMask; | |
2554 ASSERT(kShortSize == 2); | 2508 ASSERT(kShortSize == 2); |
2555 // scratch1 = length * 2 + kObjectAlignmentMask. | 2509 // scratch1 = length * 2 + kObjectAlignmentMask. |
2556 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask + | 2510 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask)); |
2557 kHeaderAlignment)); | |
2558 and_(scratch1, Immediate(~kObjectAlignmentMask)); | 2511 and_(scratch1, Immediate(~kObjectAlignmentMask)); |
2559 if (kHeaderAlignment > 0) { | |
2560 subq(scratch1, Immediate(kHeaderAlignment)); | |
2561 } | |
2562 | 2512 |
2563 // Allocate two byte string in new space. | 2513 // Allocate two byte string in new space. |
2564 AllocateInNewSpace(SeqTwoByteString::kHeaderSize, | 2514 AllocateInNewSpace(SeqTwoByteString::kHeaderSize, |
2565 times_1, | 2515 times_1, |
2566 scratch1, | 2516 scratch1, |
2567 result, | 2517 result, |
2568 scratch2, | 2518 scratch2, |
2569 scratch3, | 2519 scratch3, |
2570 gc_required, | 2520 gc_required, |
2571 TAG_OBJECT); | 2521 TAG_OBJECT); |
2572 | 2522 |
2573 // Set the map, length and hash field. | 2523 // Set the map, length and hash field. |
2574 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex); | 2524 LoadRoot(kScratchRegister, Heap::kStringMapRootIndex); |
2575 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); | 2525 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); |
2576 Integer32ToSmi(scratch1, length); | 2526 movl(FieldOperand(result, String::kLengthOffset), length); |
2577 movq(FieldOperand(result, String::kLengthOffset), scratch1); | |
2578 movl(FieldOperand(result, String::kHashFieldOffset), | 2527 movl(FieldOperand(result, String::kHashFieldOffset), |
2579 Immediate(String::kEmptyHashField)); | 2528 Immediate(String::kEmptyHashField)); |
2580 } | 2529 } |
2581 | 2530 |
2582 | 2531 |
2583 void MacroAssembler::AllocateAsciiString(Register result, | 2532 void MacroAssembler::AllocateAsciiString(Register result, |
2584 Register length, | 2533 Register length, |
2585 Register scratch1, | 2534 Register scratch1, |
2586 Register scratch2, | 2535 Register scratch2, |
2587 Register scratch3, | 2536 Register scratch3, |
2588 Label* gc_required) { | 2537 Label* gc_required) { |
2589 // Calculate the number of bytes needed for the characters in the string while | 2538 // Calculate the number of bytes needed for the characters in the string while |
2590 // observing object alignment. | 2539 // observing object alignment. |
2591 const int kHeaderAlignment = SeqAsciiString::kHeaderSize & | 2540 ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); |
2592 kObjectAlignmentMask; | |
2593 movl(scratch1, length); | 2541 movl(scratch1, length); |
2594 ASSERT(kCharSize == 1); | 2542 ASSERT(kCharSize == 1); |
2595 addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment)); | 2543 addq(scratch1, Immediate(kObjectAlignmentMask)); |
2596 and_(scratch1, Immediate(~kObjectAlignmentMask)); | 2544 and_(scratch1, Immediate(~kObjectAlignmentMask)); |
2597 if (kHeaderAlignment > 0) { | |
2598 subq(scratch1, Immediate(kHeaderAlignment)); | |
2599 } | |
2600 | 2545 |
2601 // Allocate ascii string in new space. | 2546 // Allocate ascii string in new space. |
2602 AllocateInNewSpace(SeqAsciiString::kHeaderSize, | 2547 AllocateInNewSpace(SeqAsciiString::kHeaderSize, |
2603 times_1, | 2548 times_1, |
2604 scratch1, | 2549 scratch1, |
2605 result, | 2550 result, |
2606 scratch2, | 2551 scratch2, |
2607 scratch3, | 2552 scratch3, |
2608 gc_required, | 2553 gc_required, |
2609 TAG_OBJECT); | 2554 TAG_OBJECT); |
2610 | 2555 |
2611 // Set the map, length and hash field. | 2556 // Set the map, length and hash field. |
2612 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex); | 2557 LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex); |
2613 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); | 2558 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); |
2614 Integer32ToSmi(scratch1, length); | 2559 movl(FieldOperand(result, String::kLengthOffset), length); |
2615 movq(FieldOperand(result, String::kLengthOffset), scratch1); | |
2616 movl(FieldOperand(result, String::kHashFieldOffset), | 2560 movl(FieldOperand(result, String::kHashFieldOffset), |
2617 Immediate(String::kEmptyHashField)); | 2561 Immediate(String::kEmptyHashField)); |
2618 } | 2562 } |
2619 | 2563 |
2620 | 2564 |
2621 void MacroAssembler::AllocateConsString(Register result, | 2565 void MacroAssembler::AllocateConsString(Register result, |
2622 Register scratch1, | 2566 Register scratch1, |
2623 Register scratch2, | 2567 Register scratch2, |
2624 Label* gc_required) { | 2568 Label* gc_required) { |
2625 // Allocate heap number in new space. | 2569 // Allocate heap number in new space. |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2735 CodePatcher::~CodePatcher() { | 2679 CodePatcher::~CodePatcher() { |
2736 // Indicate that code has changed. | 2680 // Indicate that code has changed. |
2737 CPU::FlushICache(address_, size_); | 2681 CPU::FlushICache(address_, size_); |
2738 | 2682 |
2739 // Check that the code was patched as expected. | 2683 // Check that the code was patched as expected. |
2740 ASSERT(masm_.pc_ == address_ + size_); | 2684 ASSERT(masm_.pc_ == address_ + size_); |
2741 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2685 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
2742 } | 2686 } |
2743 | 2687 |
2744 } } // namespace v8::internal | 2688 } } // namespace v8::internal |
OLD | NEW |