OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS | 7 #if V8_TARGET_ARCH_MIPS |
8 | 8 |
9 #include "src/codegen.h" | 9 #include "src/codegen.h" |
10 #include "src/macro-assembler.h" | 10 #include "src/macro-assembler.h" |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
50 MathExpGenerator::EmitMathExp( | 50 MathExpGenerator::EmitMathExp( |
51 &masm, input, result, double_scratch1, double_scratch2, | 51 &masm, input, result, double_scratch1, double_scratch2, |
52 temp1, temp2, temp3); | 52 temp1, temp2, temp3); |
53 __ Pop(temp3, temp2, temp1); | 53 __ Pop(temp3, temp2, temp1); |
54 __ MovToFloatResult(result); | 54 __ MovToFloatResult(result); |
55 __ Ret(); | 55 __ Ret(); |
56 } | 56 } |
57 | 57 |
58 CodeDesc desc; | 58 CodeDesc desc; |
59 masm.GetCode(&desc); | 59 masm.GetCode(&desc); |
60 ASSERT(!RelocInfo::RequiresRelocation(desc)); | 60 DCHECK(!RelocInfo::RequiresRelocation(desc)); |
61 | 61 |
62 CpuFeatures::FlushICache(buffer, actual_size); | 62 CpuFeatures::FlushICache(buffer, actual_size); |
63 base::OS::ProtectCode(buffer, actual_size); | 63 base::OS::ProtectCode(buffer, actual_size); |
64 | 64 |
65 #if !defined(USE_SIMULATOR) | 65 #if !defined(USE_SIMULATOR) |
66 return FUNCTION_CAST<UnaryMathFunction>(buffer); | 66 return FUNCTION_CAST<UnaryMathFunction>(buffer); |
67 #else | 67 #else |
68 fast_exp_mips_machine_code = buffer; | 68 fast_exp_mips_machine_code = buffer; |
69 return &fast_exp_simulator; | 69 return &fast_exp_simulator; |
70 #endif | 70 #endif |
(...skipping 20 matching lines...) Expand all Loading... |
91 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw, | 91 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw, |
92 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop; | 92 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop; |
93 | 93 |
94 // The size of each prefetch. | 94 // The size of each prefetch. |
95 uint32_t pref_chunk = 32; | 95 uint32_t pref_chunk = 32; |
96 // The maximum size of a prefetch, it must not be less then pref_chunk. | 96 // The maximum size of a prefetch, it must not be less then pref_chunk. |
97 // If the real size of a prefetch is greater then max_pref_size and | 97 // If the real size of a prefetch is greater then max_pref_size and |
98 // the kPrefHintPrepareForStore hint is used, the code will not work | 98 // the kPrefHintPrepareForStore hint is used, the code will not work |
99 // correctly. | 99 // correctly. |
100 uint32_t max_pref_size = 128; | 100 uint32_t max_pref_size = 128; |
101 ASSERT(pref_chunk < max_pref_size); | 101 DCHECK(pref_chunk < max_pref_size); |
102 | 102 |
103 // pref_limit is set based on the fact that we never use an offset | 103 // pref_limit is set based on the fact that we never use an offset |
104 // greater then 5 on a store pref and that a single pref can | 104 // greater then 5 on a store pref and that a single pref can |
105 // never be larger then max_pref_size. | 105 // never be larger then max_pref_size. |
106 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size; | 106 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size; |
107 int32_t pref_hint_load = kPrefHintLoadStreamed; | 107 int32_t pref_hint_load = kPrefHintLoadStreamed; |
108 int32_t pref_hint_store = kPrefHintPrepareForStore; | 108 int32_t pref_hint_store = kPrefHintPrepareForStore; |
109 uint32_t loadstore_chunk = 4; | 109 uint32_t loadstore_chunk = 4; |
110 | 110 |
111 // The initial prefetches may fetch bytes that are before the buffer being | 111 // The initial prefetches may fetch bytes that are before the buffer being |
112 // copied. Start copies with an offset of 4 so avoid this situation when | 112 // copied. Start copies with an offset of 4 so avoid this situation when |
113 // using kPrefHintPrepareForStore. | 113 // using kPrefHintPrepareForStore. |
114 ASSERT(pref_hint_store != kPrefHintPrepareForStore || | 114 DCHECK(pref_hint_store != kPrefHintPrepareForStore || |
115 pref_chunk * 4 >= max_pref_size); | 115 pref_chunk * 4 >= max_pref_size); |
116 | 116 |
117 // If the size is less than 8, go to lastb. Regardless of size, | 117 // If the size is less than 8, go to lastb. Regardless of size, |
118 // copy dst pointer to v0 for the retuen value. | 118 // copy dst pointer to v0 for the retuen value. |
119 __ slti(t2, a2, 2 * loadstore_chunk); | 119 __ slti(t2, a2, 2 * loadstore_chunk); |
120 __ bne(t2, zero_reg, &lastb); | 120 __ bne(t2, zero_reg, &lastb); |
121 __ mov(v0, a0); // In delay slot. | 121 __ mov(v0, a0); // In delay slot. |
122 | 122 |
123 // If src and dst have different alignments, go to unaligned, if they | 123 // If src and dst have different alignments, go to unaligned, if they |
124 // have the same alignment (but are not actually aligned) do a partial | 124 // have the same alignment (but are not actually aligned) do a partial |
(...skipping 462 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
587 __ addiu(a0, a0, 1); | 587 __ addiu(a0, a0, 1); |
588 __ addiu(a1, a1, 1); | 588 __ addiu(a1, a1, 1); |
589 __ bne(a0, a3, &ua_smallCopy_loop); | 589 __ bne(a0, a3, &ua_smallCopy_loop); |
590 __ sb(v1, MemOperand(a0, -1)); // In delay slot. | 590 __ sb(v1, MemOperand(a0, -1)); // In delay slot. |
591 | 591 |
592 __ jr(ra); | 592 __ jr(ra); |
593 __ nop(); | 593 __ nop(); |
594 } | 594 } |
595 CodeDesc desc; | 595 CodeDesc desc; |
596 masm.GetCode(&desc); | 596 masm.GetCode(&desc); |
597 ASSERT(!RelocInfo::RequiresRelocation(desc)); | 597 DCHECK(!RelocInfo::RequiresRelocation(desc)); |
598 | 598 |
599 CpuFeatures::FlushICache(buffer, actual_size); | 599 CpuFeatures::FlushICache(buffer, actual_size); |
600 base::OS::ProtectCode(buffer, actual_size); | 600 base::OS::ProtectCode(buffer, actual_size); |
601 return FUNCTION_CAST<MemCopyUint8Function>(buffer); | 601 return FUNCTION_CAST<MemCopyUint8Function>(buffer); |
602 #endif | 602 #endif |
603 } | 603 } |
604 #endif | 604 #endif |
605 | 605 |
606 UnaryMathFunction CreateSqrtFunction() { | 606 UnaryMathFunction CreateSqrtFunction() { |
607 #if defined(USE_SIMULATOR) | 607 #if defined(USE_SIMULATOR) |
608 return &std::sqrt; | 608 return &std::sqrt; |
609 #else | 609 #else |
610 size_t actual_size; | 610 size_t actual_size; |
611 byte* buffer = | 611 byte* buffer = |
612 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); | 612 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); |
613 if (buffer == NULL) return &std::sqrt; | 613 if (buffer == NULL) return &std::sqrt; |
614 | 614 |
615 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); | 615 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); |
616 | 616 |
617 __ MovFromFloatParameter(f12); | 617 __ MovFromFloatParameter(f12); |
618 __ sqrt_d(f0, f12); | 618 __ sqrt_d(f0, f12); |
619 __ MovToFloatResult(f0); | 619 __ MovToFloatResult(f0); |
620 __ Ret(); | 620 __ Ret(); |
621 | 621 |
622 CodeDesc desc; | 622 CodeDesc desc; |
623 masm.GetCode(&desc); | 623 masm.GetCode(&desc); |
624 ASSERT(!RelocInfo::RequiresRelocation(desc)); | 624 DCHECK(!RelocInfo::RequiresRelocation(desc)); |
625 | 625 |
626 CpuFeatures::FlushICache(buffer, actual_size); | 626 CpuFeatures::FlushICache(buffer, actual_size); |
627 base::OS::ProtectCode(buffer, actual_size); | 627 base::OS::ProtectCode(buffer, actual_size); |
628 return FUNCTION_CAST<UnaryMathFunction>(buffer); | 628 return FUNCTION_CAST<UnaryMathFunction>(buffer); |
629 #endif | 629 #endif |
630 } | 630 } |
631 | 631 |
632 #undef __ | 632 #undef __ |
633 | 633 |
634 | 634 |
635 // ------------------------------------------------------------------------- | 635 // ------------------------------------------------------------------------- |
636 // Platform-specific RuntimeCallHelper functions. | 636 // Platform-specific RuntimeCallHelper functions. |
637 | 637 |
638 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { | 638 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { |
639 masm->EnterFrame(StackFrame::INTERNAL); | 639 masm->EnterFrame(StackFrame::INTERNAL); |
640 ASSERT(!masm->has_frame()); | 640 DCHECK(!masm->has_frame()); |
641 masm->set_has_frame(true); | 641 masm->set_has_frame(true); |
642 } | 642 } |
643 | 643 |
644 | 644 |
645 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { | 645 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { |
646 masm->LeaveFrame(StackFrame::INTERNAL); | 646 masm->LeaveFrame(StackFrame::INTERNAL); |
647 ASSERT(masm->has_frame()); | 647 DCHECK(masm->has_frame()); |
648 masm->set_has_frame(false); | 648 masm->set_has_frame(false); |
649 } | 649 } |
650 | 650 |
651 | 651 |
652 // ------------------------------------------------------------------------- | 652 // ------------------------------------------------------------------------- |
653 // Code generators | 653 // Code generators |
654 | 654 |
655 #define __ ACCESS_MASM(masm) | 655 #define __ ACCESS_MASM(masm) |
656 | 656 |
657 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | 657 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |
658 MacroAssembler* masm, | 658 MacroAssembler* masm, |
659 Register receiver, | 659 Register receiver, |
660 Register key, | 660 Register key, |
661 Register value, | 661 Register value, |
662 Register target_map, | 662 Register target_map, |
663 AllocationSiteMode mode, | 663 AllocationSiteMode mode, |
664 Label* allocation_memento_found) { | 664 Label* allocation_memento_found) { |
665 Register scratch_elements = t0; | 665 Register scratch_elements = t0; |
666 ASSERT(!AreAliased(receiver, key, value, target_map, | 666 DCHECK(!AreAliased(receiver, key, value, target_map, |
667 scratch_elements)); | 667 scratch_elements)); |
668 | 668 |
669 if (mode == TRACK_ALLOCATION_SITE) { | 669 if (mode == TRACK_ALLOCATION_SITE) { |
670 ASSERT(allocation_memento_found != NULL); | 670 DCHECK(allocation_memento_found != NULL); |
671 __ JumpIfJSArrayHasAllocationMemento( | 671 __ JumpIfJSArrayHasAllocationMemento( |
672 receiver, scratch_elements, allocation_memento_found); | 672 receiver, scratch_elements, allocation_memento_found); |
673 } | 673 } |
674 | 674 |
675 // Set transitioned map. | 675 // Set transitioned map. |
676 __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 676 __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
677 __ RecordWriteField(receiver, | 677 __ RecordWriteField(receiver, |
678 HeapObject::kMapOffset, | 678 HeapObject::kMapOffset, |
679 target_map, | 679 target_map, |
680 t5, | 680 t5, |
(...skipping 18 matching lines...) Expand all Loading... |
699 Register length = t1; | 699 Register length = t1; |
700 Register array = t2; | 700 Register array = t2; |
701 Register array_end = array; | 701 Register array_end = array; |
702 | 702 |
703 // target_map parameter can be clobbered. | 703 // target_map parameter can be clobbered. |
704 Register scratch1 = target_map; | 704 Register scratch1 = target_map; |
705 Register scratch2 = t5; | 705 Register scratch2 = t5; |
706 Register scratch3 = t3; | 706 Register scratch3 = t3; |
707 | 707 |
708 // Verify input registers don't conflict with locals. | 708 // Verify input registers don't conflict with locals. |
709 ASSERT(!AreAliased(receiver, key, value, target_map, | 709 DCHECK(!AreAliased(receiver, key, value, target_map, |
710 elements, length, array, scratch2)); | 710 elements, length, array, scratch2)); |
711 | 711 |
712 Register scratch = t6; | 712 Register scratch = t6; |
713 | 713 |
714 if (mode == TRACK_ALLOCATION_SITE) { | 714 if (mode == TRACK_ALLOCATION_SITE) { |
715 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); | 715 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); |
716 } | 716 } |
717 | 717 |
718 // Check for empty arrays, which only require a map transition and no changes | 718 // Check for empty arrays, which only require a map transition and no changes |
719 // to the backing store. | 719 // to the backing store. |
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
844 AllocationSiteMode mode, | 844 AllocationSiteMode mode, |
845 Label* fail) { | 845 Label* fail) { |
846 // Register ra contains the return address. | 846 // Register ra contains the return address. |
847 Label entry, loop, convert_hole, gc_required, only_change_map; | 847 Label entry, loop, convert_hole, gc_required, only_change_map; |
848 Register elements = t0; | 848 Register elements = t0; |
849 Register array = t2; | 849 Register array = t2; |
850 Register length = t1; | 850 Register length = t1; |
851 Register scratch = t5; | 851 Register scratch = t5; |
852 | 852 |
853 // Verify input registers don't conflict with locals. | 853 // Verify input registers don't conflict with locals. |
854 ASSERT(!AreAliased(receiver, key, value, target_map, | 854 DCHECK(!AreAliased(receiver, key, value, target_map, |
855 elements, array, length, scratch)); | 855 elements, array, length, scratch)); |
856 | 856 |
857 if (mode == TRACK_ALLOCATION_SITE) { | 857 if (mode == TRACK_ALLOCATION_SITE) { |
858 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); | 858 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); |
859 } | 859 } |
860 | 860 |
861 // Check for empty arrays, which only require a map transition and no changes | 861 // Check for empty arrays, which only require a map transition and no changes |
862 // to the backing store. | 862 // to the backing store. |
863 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 863 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
864 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); | 864 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); |
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1082 | 1082 |
1083 | 1083 |
1084 void MathExpGenerator::EmitMathExp(MacroAssembler* masm, | 1084 void MathExpGenerator::EmitMathExp(MacroAssembler* masm, |
1085 DoubleRegister input, | 1085 DoubleRegister input, |
1086 DoubleRegister result, | 1086 DoubleRegister result, |
1087 DoubleRegister double_scratch1, | 1087 DoubleRegister double_scratch1, |
1088 DoubleRegister double_scratch2, | 1088 DoubleRegister double_scratch2, |
1089 Register temp1, | 1089 Register temp1, |
1090 Register temp2, | 1090 Register temp2, |
1091 Register temp3) { | 1091 Register temp3) { |
1092 ASSERT(!input.is(result)); | 1092 DCHECK(!input.is(result)); |
1093 ASSERT(!input.is(double_scratch1)); | 1093 DCHECK(!input.is(double_scratch1)); |
1094 ASSERT(!input.is(double_scratch2)); | 1094 DCHECK(!input.is(double_scratch2)); |
1095 ASSERT(!result.is(double_scratch1)); | 1095 DCHECK(!result.is(double_scratch1)); |
1096 ASSERT(!result.is(double_scratch2)); | 1096 DCHECK(!result.is(double_scratch2)); |
1097 ASSERT(!double_scratch1.is(double_scratch2)); | 1097 DCHECK(!double_scratch1.is(double_scratch2)); |
1098 ASSERT(!temp1.is(temp2)); | 1098 DCHECK(!temp1.is(temp2)); |
1099 ASSERT(!temp1.is(temp3)); | 1099 DCHECK(!temp1.is(temp3)); |
1100 ASSERT(!temp2.is(temp3)); | 1100 DCHECK(!temp2.is(temp3)); |
1101 ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); | 1101 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL); |
1102 | 1102 |
1103 Label zero, infinity, done; | 1103 Label zero, infinity, done; |
1104 | 1104 |
1105 __ li(temp3, Operand(ExternalReference::math_exp_constants(0))); | 1105 __ li(temp3, Operand(ExternalReference::math_exp_constants(0))); |
1106 | 1106 |
1107 __ ldc1(double_scratch1, ExpConstant(0, temp3)); | 1107 __ ldc1(double_scratch1, ExpConstant(0, temp3)); |
1108 __ BranchF(&zero, NULL, ge, double_scratch1, input); | 1108 __ BranchF(&zero, NULL, ge, double_scratch1, input); |
1109 | 1109 |
1110 __ ldc1(double_scratch2, ExpConstant(1, temp3)); | 1110 __ ldc1(double_scratch2, ExpConstant(1, temp3)); |
1111 __ BranchF(&infinity, NULL, ge, input, double_scratch2); | 1111 __ BranchF(&infinity, NULL, ge, input, double_scratch2); |
1112 | 1112 |
1113 __ ldc1(double_scratch1, ExpConstant(3, temp3)); | 1113 __ ldc1(double_scratch1, ExpConstant(3, temp3)); |
1114 __ ldc1(result, ExpConstant(4, temp3)); | 1114 __ ldc1(result, ExpConstant(4, temp3)); |
1115 __ mul_d(double_scratch1, double_scratch1, input); | 1115 __ mul_d(double_scratch1, double_scratch1, input); |
1116 __ add_d(double_scratch1, double_scratch1, result); | 1116 __ add_d(double_scratch1, double_scratch1, result); |
1117 __ FmoveLow(temp2, double_scratch1); | 1117 __ FmoveLow(temp2, double_scratch1); |
1118 __ sub_d(double_scratch1, double_scratch1, result); | 1118 __ sub_d(double_scratch1, double_scratch1, result); |
1119 __ ldc1(result, ExpConstant(6, temp3)); | 1119 __ ldc1(result, ExpConstant(6, temp3)); |
1120 __ ldc1(double_scratch2, ExpConstant(5, temp3)); | 1120 __ ldc1(double_scratch2, ExpConstant(5, temp3)); |
1121 __ mul_d(double_scratch1, double_scratch1, double_scratch2); | 1121 __ mul_d(double_scratch1, double_scratch1, double_scratch2); |
1122 __ sub_d(double_scratch1, double_scratch1, input); | 1122 __ sub_d(double_scratch1, double_scratch1, input); |
1123 __ sub_d(result, result, double_scratch1); | 1123 __ sub_d(result, result, double_scratch1); |
1124 __ mul_d(double_scratch2, double_scratch1, double_scratch1); | 1124 __ mul_d(double_scratch2, double_scratch1, double_scratch1); |
1125 __ mul_d(result, result, double_scratch2); | 1125 __ mul_d(result, result, double_scratch2); |
1126 __ ldc1(double_scratch2, ExpConstant(7, temp3)); | 1126 __ ldc1(double_scratch2, ExpConstant(7, temp3)); |
1127 __ mul_d(result, result, double_scratch2); | 1127 __ mul_d(result, result, double_scratch2); |
1128 __ sub_d(result, result, double_scratch1); | 1128 __ sub_d(result, result, double_scratch1); |
1129 // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1. | 1129 // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1. |
1130 ASSERT(*reinterpret_cast<double*> | 1130 DCHECK(*reinterpret_cast<double*> |
1131 (ExternalReference::math_exp_constants(8).address()) == 1); | 1131 (ExternalReference::math_exp_constants(8).address()) == 1); |
1132 __ Move(double_scratch2, 1); | 1132 __ Move(double_scratch2, 1); |
1133 __ add_d(result, result, double_scratch2); | 1133 __ add_d(result, result, double_scratch2); |
1134 __ srl(temp1, temp2, 11); | 1134 __ srl(temp1, temp2, 11); |
1135 __ Ext(temp2, temp2, 0, 11); | 1135 __ Ext(temp2, temp2, 0, 11); |
1136 __ Addu(temp1, temp1, Operand(0x3ff)); | 1136 __ Addu(temp1, temp1, Operand(0x3ff)); |
1137 | 1137 |
1138 // Must not call ExpConstant() after overwriting temp3! | 1138 // Must not call ExpConstant() after overwriting temp3! |
1139 __ li(temp3, Operand(ExternalReference::math_exp_log_table())); | 1139 __ li(temp3, Operand(ExternalReference::math_exp_log_table())); |
1140 __ sll(at, temp2, 3); | 1140 __ sll(at, temp2, 3); |
(...skipping 23 matching lines...) Expand all Loading... |
1164 __ bind(&done); | 1164 __ bind(&done); |
1165 } | 1165 } |
1166 | 1166 |
1167 #ifdef DEBUG | 1167 #ifdef DEBUG |
1168 // nop(CODE_AGE_MARKER_NOP) | 1168 // nop(CODE_AGE_MARKER_NOP) |
1169 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180; | 1169 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180; |
1170 #endif | 1170 #endif |
1171 | 1171 |
1172 | 1172 |
1173 CodeAgingHelper::CodeAgingHelper() { | 1173 CodeAgingHelper::CodeAgingHelper() { |
1174 ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength); | 1174 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); |
1175 // Since patcher is a large object, allocate it dynamically when needed, | 1175 // Since patcher is a large object, allocate it dynamically when needed, |
1176 // to avoid overloading the stack in stress conditions. | 1176 // to avoid overloading the stack in stress conditions. |
1177 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in | 1177 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in |
1178 // the process, before MIPS simulator ICache is setup. | 1178 // the process, before MIPS simulator ICache is setup. |
1179 SmartPointer<CodePatcher> patcher( | 1179 SmartPointer<CodePatcher> patcher( |
1180 new CodePatcher(young_sequence_.start(), | 1180 new CodePatcher(young_sequence_.start(), |
1181 young_sequence_.length() / Assembler::kInstrSize, | 1181 young_sequence_.length() / Assembler::kInstrSize, |
1182 CodePatcher::DONT_FLUSH)); | 1182 CodePatcher::DONT_FLUSH)); |
1183 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); | 1183 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); |
1184 patcher->masm()->Push(ra, fp, cp, a1); | 1184 patcher->masm()->Push(ra, fp, cp, a1); |
1185 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); | 1185 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); |
1186 patcher->masm()->Addu( | 1186 patcher->masm()->Addu( |
1187 fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 1187 fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
1188 } | 1188 } |
1189 | 1189 |
1190 | 1190 |
1191 #ifdef DEBUG | 1191 #ifdef DEBUG |
1192 bool CodeAgingHelper::IsOld(byte* candidate) const { | 1192 bool CodeAgingHelper::IsOld(byte* candidate) const { |
1193 return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction; | 1193 return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction; |
1194 } | 1194 } |
1195 #endif | 1195 #endif |
1196 | 1196 |
1197 | 1197 |
1198 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { | 1198 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { |
1199 bool result = isolate->code_aging_helper()->IsYoung(sequence); | 1199 bool result = isolate->code_aging_helper()->IsYoung(sequence); |
1200 ASSERT(result || isolate->code_aging_helper()->IsOld(sequence)); | 1200 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); |
1201 return result; | 1201 return result; |
1202 } | 1202 } |
1203 | 1203 |
1204 | 1204 |
1205 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, | 1205 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, |
1206 MarkingParity* parity) { | 1206 MarkingParity* parity) { |
1207 if (IsYoungSequence(isolate, sequence)) { | 1207 if (IsYoungSequence(isolate, sequence)) { |
1208 *age = kNoAgeCodeAge; | 1208 *age = kNoAgeCodeAge; |
1209 *parity = NO_MARKING_PARITY; | 1209 *parity = NO_MARKING_PARITY; |
1210 } else { | 1210 } else { |
(...skipping 30 matching lines...) Expand all Loading... |
1241 patcher.masm()->nop(); // Pad the empty space. | 1241 patcher.masm()->nop(); // Pad the empty space. |
1242 } | 1242 } |
1243 } | 1243 } |
1244 | 1244 |
1245 | 1245 |
1246 #undef __ | 1246 #undef __ |
1247 | 1247 |
1248 } } // namespace v8::internal | 1248 } } // namespace v8::internal |
1249 | 1249 |
1250 #endif // V8_TARGET_ARCH_MIPS | 1250 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |