OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" | 5 #include "vm/globals.h" |
6 #if defined(TARGET_ARCH_MIPS) | 6 #if defined(TARGET_ARCH_MIPS) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/code_generator.h" | 9 #include "vm/code_generator.h" |
10 #include "vm/compiler.h" | 10 #include "vm/compiler.h" |
11 #include "vm/dart_entry.h" | 11 #include "vm/dart_entry.h" |
12 #include "vm/flow_graph_compiler.h" | 12 #include "vm/flow_graph_compiler.h" |
13 #include "vm/heap.h" | 13 #include "vm/heap.h" |
14 #include "vm/instructions.h" | 14 #include "vm/instructions.h" |
15 #include "vm/object_store.h" | 15 #include "vm/object_store.h" |
16 #include "vm/stack_frame.h" | 16 #include "vm/stack_frame.h" |
17 #include "vm/stub_code.h" | 17 #include "vm/stub_code.h" |
18 #include "vm/tags.h" | 18 #include "vm/tags.h" |
19 | 19 |
20 #define __ assembler-> | 20 #define __ assembler-> |
21 | 21 |
22 namespace dart { | 22 namespace dart { |
23 | 23 |
24 DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects."); | 24 DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects."); |
25 DEFINE_FLAG(bool, use_slow_path, false, | 25 DEFINE_FLAG(bool, |
26 "Set to true for debugging & verifying the slow paths."); | 26 use_slow_path, |
| 27 false, |
| 28 "Set to true for debugging & verifying the slow paths."); |
27 DECLARE_FLAG(bool, trace_optimized_ic_calls); | 29 DECLARE_FLAG(bool, trace_optimized_ic_calls); |
28 | 30 |
29 // Input parameters: | 31 // Input parameters: |
30 // RA : return address. | 32 // RA : return address. |
31 // SP : address of last argument in argument array. | 33 // SP : address of last argument in argument array. |
32 // SP + 4*S4 - 4 : address of first argument in argument array. | 34 // SP + 4*S4 - 4 : address of first argument in argument array. |
33 // SP + 4*S4 : address of return value. | 35 // SP + 4*S4 : address of return value. |
34 // S5 : address of the runtime function to call. | 36 // S5 : address of the runtime function to call. |
35 // S4 : number of arguments to the call. | 37 // S4 : number of arguments to the call. |
36 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { | 38 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { |
37 const intptr_t thread_offset = NativeArguments::thread_offset(); | 39 const intptr_t thread_offset = NativeArguments::thread_offset(); |
38 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | 40 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); |
39 const intptr_t argv_offset = NativeArguments::argv_offset(); | 41 const intptr_t argv_offset = NativeArguments::argv_offset(); |
40 const intptr_t retval_offset = NativeArguments::retval_offset(); | 42 const intptr_t retval_offset = NativeArguments::retval_offset(); |
41 | 43 |
42 __ SetPrologueOffset(); | 44 __ SetPrologueOffset(); |
43 __ Comment("CallToRuntimeStub"); | 45 __ Comment("CallToRuntimeStub"); |
44 __ EnterStubFrame(); | 46 __ EnterStubFrame(); |
45 | 47 |
46 // Save exit frame information to enable stack walking as we are about | 48 // Save exit frame information to enable stack walking as we are about |
47 // to transition to Dart VM C++ code. | 49 // to transition to Dart VM C++ code. |
48 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); | 50 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); |
49 | 51 |
50 #if defined(DEBUG) | 52 #if defined(DEBUG) |
51 { Label ok; | 53 { |
| 54 Label ok; |
52 // Check that we are always entering from Dart code. | 55 // Check that we are always entering from Dart code. |
53 __ lw(T0, Assembler::VMTagAddress()); | 56 __ lw(T0, Assembler::VMTagAddress()); |
54 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | 57 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); |
55 __ Stop("Not coming from Dart code."); | 58 __ Stop("Not coming from Dart code."); |
56 __ Bind(&ok); | 59 __ Bind(&ok); |
57 } | 60 } |
58 #endif | 61 #endif |
59 | 62 |
60 // Mark that the thread is executing VM code. | 63 // Mark that the thread is executing VM code. |
61 __ sw(S5, Assembler::VMTagAddress()); | 64 __ sw(S5, Assembler::VMTagAddress()); |
(...skipping 21 matching lines...) Expand all Loading... |
83 // Set argv in NativeArguments. | 86 // Set argv in NativeArguments. |
84 __ addiu(A2, A2, Immediate(kParamEndSlotFromFp * kWordSize)); | 87 __ addiu(A2, A2, Immediate(kParamEndSlotFromFp * kWordSize)); |
85 | 88 |
86 | 89 |
87 // Call runtime or redirection via simulator. | 90 // Call runtime or redirection via simulator. |
88 // We defensively always jalr through T9 because it is sometimes required by | 91 // We defensively always jalr through T9 because it is sometimes required by |
89 // the MIPS ABI. | 92 // the MIPS ABI. |
90 __ mov(T9, S5); | 93 __ mov(T9, S5); |
91 __ jalr(T9); | 94 __ jalr(T9); |
92 | 95 |
93 ASSERT(retval_offset == 3 * kWordSize); | 96 ASSERT(retval_offset == 3 * kWordSize); |
94 // Retval is next to 1st argument. | 97 // Retval is next to 1st argument. |
95 __ delay_slot()->addiu(A3, A2, Immediate(kWordSize)); | 98 __ delay_slot()->addiu(A3, A2, Immediate(kWordSize)); |
96 __ Comment("CallToRuntimeStub return"); | 99 __ Comment("CallToRuntimeStub return"); |
97 | 100 |
98 // Mark that the thread is executing Dart code. | 101 // Mark that the thread is executing Dart code. |
99 __ LoadImmediate(A2, VMTag::kDartTagId); | 102 __ LoadImmediate(A2, VMTag::kDartTagId); |
100 __ sw(A2, Assembler::VMTagAddress()); | 103 __ sw(A2, Assembler::VMTagAddress()); |
101 | 104 |
102 // Reset exit frame information in Isolate structure. | 105 // Reset exit frame information in Isolate structure. |
103 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | 106 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
139 | 142 |
140 __ SetPrologueOffset(); | 143 __ SetPrologueOffset(); |
141 __ Comment("CallNativeCFunctionStub"); | 144 __ Comment("CallNativeCFunctionStub"); |
142 __ EnterStubFrame(); | 145 __ EnterStubFrame(); |
143 | 146 |
144 // Save exit frame information to enable stack walking as we are about | 147 // Save exit frame information to enable stack walking as we are about |
145 // to transition to native code. | 148 // to transition to native code. |
146 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); | 149 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); |
147 | 150 |
148 #if defined(DEBUG) | 151 #if defined(DEBUG) |
149 { Label ok; | 152 { |
| 153 Label ok; |
150 // Check that we are always entering from Dart code. | 154 // Check that we are always entering from Dart code. |
151 __ lw(T0, Assembler::VMTagAddress()); | 155 __ lw(T0, Assembler::VMTagAddress()); |
152 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | 156 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); |
153 __ Stop("Not coming from Dart code."); | 157 __ Stop("Not coming from Dart code."); |
154 __ Bind(&ok); | 158 __ Bind(&ok); |
155 } | 159 } |
156 #endif | 160 #endif |
157 | 161 |
158 // Mark that the thread is executing native code. | 162 // Mark that the thread is executing native code. |
159 __ sw(T5, Assembler::VMTagAddress()); | 163 __ sw(T5, Assembler::VMTagAddress()); |
(...skipping 21 matching lines...) Expand all Loading... |
181 // Dart API for native functions. | 185 // Dart API for native functions. |
182 // For now, space is reserved on the stack and we pass a pointer to it. | 186 // For now, space is reserved on the stack and we pass a pointer to it. |
183 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | 187 __ addiu(SP, SP, Immediate(-4 * kWordSize)); |
184 __ sw(A3, Address(SP, 3 * kWordSize)); | 188 __ sw(A3, Address(SP, 3 * kWordSize)); |
185 __ sw(A2, Address(SP, 2 * kWordSize)); | 189 __ sw(A2, Address(SP, 2 * kWordSize)); |
186 __ sw(A1, Address(SP, 1 * kWordSize)); | 190 __ sw(A1, Address(SP, 1 * kWordSize)); |
187 __ sw(A0, Address(SP, 0 * kWordSize)); | 191 __ sw(A0, Address(SP, 0 * kWordSize)); |
188 __ mov(A0, SP); // Pass the pointer to the NativeArguments. | 192 __ mov(A0, SP); // Pass the pointer to the NativeArguments. |
189 | 193 |
190 | 194 |
191 __ mov(A1, T5); // Pass the function entrypoint. | 195 __ mov(A1, T5); // Pass the function entrypoint. |
192 __ ReserveAlignedFrameSpace(2 * kWordSize); // Just passing A0, A1. | 196 __ ReserveAlignedFrameSpace(2 * kWordSize); // Just passing A0, A1. |
193 | 197 |
194 // Call native wrapper function or redirection via simulator. | 198 // Call native wrapper function or redirection via simulator. |
195 __ lw(T9, Address(THR, Thread::native_call_wrapper_entry_point_offset())); | 199 __ lw(T9, Address(THR, Thread::native_call_wrapper_entry_point_offset())); |
196 __ jalr(T9); | 200 __ jalr(T9); |
197 __ Comment("CallNativeCFunctionStub return"); | 201 __ Comment("CallNativeCFunctionStub return"); |
198 | 202 |
199 // Mark that the thread is executing Dart code. | 203 // Mark that the thread is executing Dart code. |
200 __ LoadImmediate(A2, VMTag::kDartTagId); | 204 __ LoadImmediate(A2, VMTag::kDartTagId); |
201 __ sw(A2, Assembler::VMTagAddress()); | 205 __ sw(A2, Assembler::VMTagAddress()); |
(...skipping 19 matching lines...) Expand all Loading... |
221 | 225 |
222 __ SetPrologueOffset(); | 226 __ SetPrologueOffset(); |
223 __ Comment("CallNativeCFunctionStub"); | 227 __ Comment("CallNativeCFunctionStub"); |
224 __ EnterStubFrame(); | 228 __ EnterStubFrame(); |
225 | 229 |
226 // Save exit frame information to enable stack walking as we are about | 230 // Save exit frame information to enable stack walking as we are about |
227 // to transition to native code. | 231 // to transition to native code. |
228 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); | 232 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); |
229 | 233 |
230 #if defined(DEBUG) | 234 #if defined(DEBUG) |
231 { Label ok; | 235 { |
| 236 Label ok; |
232 // Check that we are always entering from Dart code. | 237 // Check that we are always entering from Dart code. |
233 __ lw(T0, Assembler::VMTagAddress()); | 238 __ lw(T0, Assembler::VMTagAddress()); |
234 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | 239 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); |
235 __ Stop("Not coming from Dart code."); | 240 __ Stop("Not coming from Dart code."); |
236 __ Bind(&ok); | 241 __ Bind(&ok); |
237 } | 242 } |
238 #endif | 243 #endif |
239 | 244 |
240 // Mark that the thread is executing native code. | 245 // Mark that the thread is executing native code. |
241 __ sw(T5, Assembler::VMTagAddress()); | 246 __ sw(T5, Assembler::VMTagAddress()); |
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
466 } | 471 } |
467 } | 472 } |
468 for (int i = 0; i < kNumberOfFRegisters; i++) { | 473 for (int i = 0; i < kNumberOfFRegisters; i++) { |
469 // These go below the CPU registers. | 474 // These go below the CPU registers. |
470 const int slot = kNumberOfCpuRegisters + kNumberOfFRegisters - i; | 475 const int slot = kNumberOfCpuRegisters + kNumberOfFRegisters - i; |
471 FRegister reg = static_cast<FRegister>(i); | 476 FRegister reg = static_cast<FRegister>(i); |
472 __ swc1(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); | 477 __ swc1(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); |
473 } | 478 } |
474 | 479 |
475 __ mov(A0, SP); // Pass address of saved registers block. | 480 __ mov(A0, SP); // Pass address of saved registers block. |
476 bool is_lazy = (kind == kLazyDeoptFromReturn) || | 481 bool is_lazy = |
477 (kind == kLazyDeoptFromThrow); | 482 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow); |
478 __ LoadImmediate(A1, is_lazy ? 1 : 0); | 483 __ LoadImmediate(A1, is_lazy ? 1 : 0); |
479 __ ReserveAlignedFrameSpace(1 * kWordSize); | 484 __ ReserveAlignedFrameSpace(1 * kWordSize); |
480 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); | 485 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); |
481 // Result (V0) is stack-size (FP - SP) in bytes, incl. the return address. | 486 // Result (V0) is stack-size (FP - SP) in bytes, incl. the return address. |
482 | 487 |
483 if (kind == kLazyDeoptFromReturn) { | 488 if (kind == kLazyDeoptFromReturn) { |
484 // Restore result into T1 temporarily. | 489 // Restore result into T1 temporarily. |
485 __ lw(T1, Address(FP, saved_result_slot_from_fp * kWordSize)); | 490 __ lw(T1, Address(FP, saved_result_slot_from_fp * kWordSize)); |
486 } else if (kind == kLazyDeoptFromThrow) { | 491 } else if (kind == kLazyDeoptFromThrow) { |
487 // Restore result into T1 temporarily. | 492 // Restore result into T1 temporarily. |
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
696 __ LoadImmediate(T3, ~(kObjectAlignment - 1)); | 701 __ LoadImmediate(T3, ~(kObjectAlignment - 1)); |
697 __ and_(T2, T2, T3); | 702 __ and_(T2, T2, T3); |
698 | 703 |
699 // T2: Allocation size. | 704 // T2: Allocation size. |
700 | 705 |
701 Heap::Space space = Heap::kNew; | 706 Heap::Space space = Heap::kNew; |
702 __ lw(T3, Address(THR, Thread::heap_offset())); | 707 __ lw(T3, Address(THR, Thread::heap_offset())); |
703 // Potential new object start. | 708 // Potential new object start. |
704 __ lw(T0, Address(T3, Heap::TopOffset(space))); | 709 __ lw(T0, Address(T3, Heap::TopOffset(space))); |
705 | 710 |
706 __ addu(T1, T0, T2); // Potential next object start. | 711 __ addu(T1, T0, T2); // Potential next object start. |
707 __ BranchUnsignedLess(T1, T0, &slow_case); // Branch on unsigned overflow. | 712 __ BranchUnsignedLess(T1, T0, &slow_case); // Branch on unsigned overflow. |
708 | 713 |
709 // Check if the allocation fits into the remaining space. | 714 // Check if the allocation fits into the remaining space. |
710 // T0: potential new object start. | 715 // T0: potential new object start. |
711 // T1: potential next object start. | 716 // T1: potential next object start. |
712 // T2: allocation size. | 717 // T2: allocation size. |
713 // T3: heap. | 718 // T3: heap. |
714 __ lw(T4, Address(T3, Heap::EndOffset(space))); | 719 __ lw(T4, Address(T3, Heap::EndOffset(space))); |
715 __ BranchUnsignedGreaterEqual(T1, T4, &slow_case); | 720 __ BranchUnsignedGreaterEqual(T1, T4, &slow_case); |
716 | 721 |
717 // Successfully allocated the object(s), now update top to point to | 722 // Successfully allocated the object(s), now update top to point to |
718 // next object start and initialize the object. | 723 // next object start and initialize the object. |
719 // T3: heap. | 724 // T3: heap. |
720 __ sw(T1, Address(T3, Heap::TopOffset(space))); | 725 __ sw(T1, Address(T3, Heap::TopOffset(space))); |
721 __ addiu(T0, T0, Immediate(kHeapObjectTag)); | 726 __ addiu(T0, T0, Immediate(kHeapObjectTag)); |
722 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T4, space)); | 727 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T4, space)); |
723 | 728 |
724 // Initialize the tags. | 729 // Initialize the tags. |
725 // T0: new object start as a tagged pointer. | 730 // T0: new object start as a tagged pointer. |
726 // T1: new object end address. | 731 // T1: new object end address. |
727 // T2: allocation size. | 732 // T2: allocation size. |
728 { | 733 { |
729 Label overflow, done; | 734 Label overflow, done; |
730 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | 735 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; |
731 | 736 |
732 __ BranchUnsignedGreater( | 737 __ BranchUnsignedGreater(T2, Immediate(RawObject::SizeTag::kMaxSizeTag), |
733 T2, Immediate(RawObject::SizeTag::kMaxSizeTag), &overflow); | 738 &overflow); |
734 __ b(&done); | 739 __ b(&done); |
735 __ delay_slot()->sll(T2, T2, shift); | 740 __ delay_slot()->sll(T2, T2, shift); |
736 __ Bind(&overflow); | 741 __ Bind(&overflow); |
737 __ mov(T2, ZR); | 742 __ mov(T2, ZR); |
738 __ Bind(&done); | 743 __ Bind(&done); |
739 | 744 |
740 // Get the class index and insert it into the tags. | 745 // Get the class index and insert it into the tags. |
741 // T2: size and bit tags. | 746 // T2: size and bit tags. |
742 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); | 747 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); |
743 __ or_(T2, T2, TMP); | 748 __ or_(T2, T2, TMP); |
744 __ sw(T2, FieldAddress(T0, Array::tags_offset())); // Store tags. | 749 __ sw(T2, FieldAddress(T0, Array::tags_offset())); // Store tags. |
745 } | 750 } |
746 | 751 |
747 // T0: new object start as a tagged pointer. | 752 // T0: new object start as a tagged pointer. |
748 // T1: new object end address. | 753 // T1: new object end address. |
749 // Store the type argument field. | 754 // Store the type argument field. |
750 __ StoreIntoObjectNoBarrier(T0, | 755 __ StoreIntoObjectNoBarrier( |
751 FieldAddress(T0, Array::type_arguments_offset()), | 756 T0, FieldAddress(T0, Array::type_arguments_offset()), A0); |
752 A0); | |
753 | 757 |
754 // Set the length field. | 758 // Set the length field. |
755 __ StoreIntoObjectNoBarrier(T0, | 759 __ StoreIntoObjectNoBarrier(T0, FieldAddress(T0, Array::length_offset()), A1); |
756 FieldAddress(T0, Array::length_offset()), | |
757 A1); | |
758 | 760 |
759 __ LoadObject(T7, Object::null_object()); | 761 __ LoadObject(T7, Object::null_object()); |
760 // Initialize all array elements to raw_null. | 762 // Initialize all array elements to raw_null. |
761 // T0: new object start as a tagged pointer. | 763 // T0: new object start as a tagged pointer. |
762 // T1: new object end address. | 764 // T1: new object end address. |
763 // T2: iterator which initially points to the start of the variable | 765 // T2: iterator which initially points to the start of the variable |
764 // data area to be initialized. | 766 // data area to be initialized. |
765 // T7: null. | 767 // T7: null. |
766 __ AddImmediate(T2, T0, sizeof(RawArray) - kHeapObjectTag); | 768 __ AddImmediate(T2, T0, sizeof(RawArray) - kHeapObjectTag); |
767 | 769 |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
815 | 817 |
816 // Push code object to PC marker slot. | 818 // Push code object to PC marker slot. |
817 __ lw(TMP, Address(A3, Thread::invoke_dart_code_stub_offset())); | 819 __ lw(TMP, Address(A3, Thread::invoke_dart_code_stub_offset())); |
818 __ Push(TMP); | 820 __ Push(TMP); |
819 | 821 |
820 // Save new context and C++ ABI callee-saved registers. | 822 // Save new context and C++ ABI callee-saved registers. |
821 | 823 |
822 // The saved vm tag, top resource, and top exit frame info. | 824 // The saved vm tag, top resource, and top exit frame info. |
823 const intptr_t kPreservedSlots = 3; | 825 const intptr_t kPreservedSlots = 3; |
824 const intptr_t kPreservedRegSpace = | 826 const intptr_t kPreservedRegSpace = |
825 kWordSize * (kAbiPreservedCpuRegCount + kAbiPreservedFpuRegCount + | 827 kWordSize * |
826 kPreservedSlots); | 828 (kAbiPreservedCpuRegCount + kAbiPreservedFpuRegCount + kPreservedSlots); |
827 | 829 |
828 __ addiu(SP, SP, Immediate(-kPreservedRegSpace)); | 830 __ addiu(SP, SP, Immediate(-kPreservedRegSpace)); |
829 for (int i = S0; i <= S7; i++) { | 831 for (int i = S0; i <= S7; i++) { |
830 Register r = static_cast<Register>(i); | 832 Register r = static_cast<Register>(i); |
831 const intptr_t slot = i - S0 + kPreservedSlots; | 833 const intptr_t slot = i - S0 + kPreservedSlots; |
832 __ sw(r, Address(SP, slot * kWordSize)); | 834 __ sw(r, Address(SP, slot * kWordSize)); |
833 } | 835 } |
834 | 836 |
835 for (intptr_t i = kAbiFirstPreservedFpuReg; | 837 for (intptr_t i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; |
836 i <= kAbiLastPreservedFpuReg; i++) { | 838 i++) { |
837 FRegister r = static_cast<FRegister>(i); | 839 FRegister r = static_cast<FRegister>(i); |
838 const intptr_t slot = | 840 const intptr_t slot = kAbiPreservedCpuRegCount + kPreservedSlots + i - |
839 kAbiPreservedCpuRegCount + kPreservedSlots + i - | 841 kAbiFirstPreservedFpuReg; |
840 kAbiFirstPreservedFpuReg; | |
841 __ swc1(r, Address(SP, slot * kWordSize)); | 842 __ swc1(r, Address(SP, slot * kWordSize)); |
842 } | 843 } |
843 | 844 |
844 // We now load the pool pointer(PP) with a GC safe value as we are about | 845 // We now load the pool pointer(PP) with a GC safe value as we are about |
845 // to invoke dart code. | 846 // to invoke dart code. |
846 __ LoadImmediate(PP, 0); | 847 __ LoadImmediate(PP, 0); |
847 | 848 |
848 // Set up THR, which caches the current thread in Dart code. | 849 // Set up THR, which caches the current thread in Dart code. |
849 if (THR != A3) { | 850 if (THR != A3) { |
850 __ mov(THR, A3); | 851 __ mov(THR, A3); |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
921 __ lw(T0, Address(SP, 0 * kWordSize)); | 922 __ lw(T0, Address(SP, 0 * kWordSize)); |
922 __ sw(T0, Address(THR, Thread::top_exit_frame_info_offset())); | 923 __ sw(T0, Address(THR, Thread::top_exit_frame_info_offset())); |
923 | 924 |
924 // Restore C++ ABI callee-saved registers. | 925 // Restore C++ ABI callee-saved registers. |
925 for (int i = S0; i <= S7; i++) { | 926 for (int i = S0; i <= S7; i++) { |
926 Register r = static_cast<Register>(i); | 927 Register r = static_cast<Register>(i); |
927 const intptr_t slot = i - S0 + kPreservedSlots; | 928 const intptr_t slot = i - S0 + kPreservedSlots; |
928 __ lw(r, Address(SP, slot * kWordSize)); | 929 __ lw(r, Address(SP, slot * kWordSize)); |
929 } | 930 } |
930 | 931 |
931 for (intptr_t i = kAbiFirstPreservedFpuReg; | 932 for (intptr_t i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; |
932 i <= kAbiLastPreservedFpuReg; i++) { | 933 i++) { |
933 FRegister r = static_cast<FRegister>(i); | 934 FRegister r = static_cast<FRegister>(i); |
934 const intptr_t slot = | 935 const intptr_t slot = kAbiPreservedCpuRegCount + kPreservedSlots + i - |
935 kAbiPreservedCpuRegCount + kPreservedSlots + i - | 936 kAbiFirstPreservedFpuReg; |
936 kAbiFirstPreservedFpuReg; | |
937 __ lwc1(r, Address(SP, slot * kWordSize)); | 937 __ lwc1(r, Address(SP, slot * kWordSize)); |
938 } | 938 } |
939 | 939 |
940 __ addiu(SP, SP, Immediate(kPreservedRegSpace)); | 940 __ addiu(SP, SP, Immediate(kPreservedRegSpace)); |
941 | 941 |
942 // Restore the frame pointer and return. | 942 // Restore the frame pointer and return. |
943 __ LeaveFrameAndReturn(); | 943 __ LeaveFrameAndReturn(); |
944 } | 944 } |
945 | 945 |
946 | 946 |
947 // Called for inline allocation of contexts. | 947 // Called for inline allocation of contexts. |
948 // Input: | 948 // Input: |
949 // T1: number of context variables. | 949 // T1: number of context variables. |
950 // Output: | 950 // Output: |
951 // V0: new allocated RawContext object. | 951 // V0: new allocated RawContext object. |
952 void StubCode::GenerateAllocateContextStub(Assembler* assembler) { | 952 void StubCode::GenerateAllocateContextStub(Assembler* assembler) { |
953 __ Comment("AllocateContext"); | 953 __ Comment("AllocateContext"); |
954 if (FLAG_inline_alloc) { | 954 if (FLAG_inline_alloc) { |
955 Label slow_case; | 955 Label slow_case; |
956 // First compute the rounded instance size. | 956 // First compute the rounded instance size. |
957 // T1: number of context variables. | 957 // T1: number of context variables. |
958 intptr_t fixed_size = sizeof(RawContext) + kObjectAlignment - 1; | 958 intptr_t fixed_size = sizeof(RawContext) + kObjectAlignment - 1; |
959 __ LoadImmediate(T2, fixed_size); | 959 __ LoadImmediate(T2, fixed_size); |
960 __ sll(T0, T1, 2); | 960 __ sll(T0, T1, 2); |
961 __ addu(T2, T2, T0); | 961 __ addu(T2, T2, T0); |
962 ASSERT(kSmiTagShift == 1); | 962 ASSERT(kSmiTagShift == 1); |
963 __ LoadImmediate(T0, ~((kObjectAlignment) - 1)); | 963 __ LoadImmediate(T0, ~((kObjectAlignment)-1)); |
964 __ and_(T2, T2, T0); | 964 __ and_(T2, T2, T0); |
965 | 965 |
966 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, T4, &slow_case)); | 966 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, T4, &slow_case)); |
967 // Now allocate the object. | 967 // Now allocate the object. |
968 // T1: number of context variables. | 968 // T1: number of context variables. |
969 // T2: object size. | 969 // T2: object size. |
970 const intptr_t cid = kContextCid; | 970 const intptr_t cid = kContextCid; |
971 Heap::Space space = Heap::kNew; | 971 Heap::Space space = Heap::kNew; |
972 __ lw(T5, Address(THR, Thread::heap_offset())); | 972 __ lw(T5, Address(THR, Thread::heap_offset())); |
973 __ lw(V0, Address(T5, Heap::TopOffset(space))); | 973 __ lw(V0, Address(T5, Heap::TopOffset(space))); |
(...skipping 23 matching lines...) Expand all Loading... |
997 __ addiu(V0, V0, Immediate(kHeapObjectTag)); | 997 __ addiu(V0, V0, Immediate(kHeapObjectTag)); |
998 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T5, space)); | 998 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T5, space)); |
999 | 999 |
1000 // Calculate the size tag. | 1000 // Calculate the size tag. |
1001 // V0: new object. | 1001 // V0: new object. |
1002 // T1: number of context variables. | 1002 // T1: number of context variables. |
1003 // T2: object size. | 1003 // T2: object size. |
1004 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | 1004 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; |
1005 __ LoadImmediate(TMP, RawObject::SizeTag::kMaxSizeTag); | 1005 __ LoadImmediate(TMP, RawObject::SizeTag::kMaxSizeTag); |
1006 __ sltu(CMPRES1, TMP, T2); // CMPRES1 = T2 > TMP ? 1 : 0. | 1006 __ sltu(CMPRES1, TMP, T2); // CMPRES1 = T2 > TMP ? 1 : 0. |
1007 __ movn(T2, ZR, CMPRES1); // T2 = CMPRES1 != 0 ? 0 : T2. | 1007 __ movn(T2, ZR, CMPRES1); // T2 = CMPRES1 != 0 ? 0 : T2. |
1008 __ sll(TMP, T2, shift); // TMP = T2 << shift. | 1008 __ sll(TMP, T2, shift); // TMP = T2 << shift. |
1009 __ movz(T2, TMP, CMPRES1); // T2 = CMPRES1 == 0 ? TMP : T2. | 1009 __ movz(T2, TMP, CMPRES1); // T2 = CMPRES1 == 0 ? TMP : T2. |
1010 | 1010 |
1011 // Get the class index and insert it into the tags. | 1011 // Get the class index and insert it into the tags. |
1012 // T2: size and bit tags. | 1012 // T2: size and bit tags. |
1013 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); | 1013 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); |
1014 __ or_(T2, T2, TMP); | 1014 __ or_(T2, T2, TMP); |
1015 __ sw(T2, FieldAddress(V0, Context::tags_offset())); | 1015 __ sw(T2, FieldAddress(V0, Context::tags_offset())); |
1016 | 1016 |
1017 // Setup up number of context variables field. | 1017 // Setup up number of context variables field. |
1018 // V0: new object. | 1018 // V0: new object. |
(...skipping 27 matching lines...) Expand all Loading... |
1046 // Create a stub frame as we are pushing some objects on the stack before | 1046 // Create a stub frame as we are pushing some objects on the stack before |
1047 // calling into the runtime. | 1047 // calling into the runtime. |
1048 __ EnterStubFrame(); | 1048 __ EnterStubFrame(); |
1049 // Setup space on stack for return value. | 1049 // Setup space on stack for return value. |
1050 __ SmiTag(T1); | 1050 __ SmiTag(T1); |
1051 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | 1051 __ addiu(SP, SP, Immediate(-2 * kWordSize)); |
1052 __ LoadObject(TMP, Object::null_object()); | 1052 __ LoadObject(TMP, Object::null_object()); |
1053 __ sw(TMP, Address(SP, 1 * kWordSize)); // Store null. | 1053 __ sw(TMP, Address(SP, 1 * kWordSize)); // Store null. |
1054 __ sw(T1, Address(SP, 0 * kWordSize)); | 1054 __ sw(T1, Address(SP, 0 * kWordSize)); |
1055 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context. | 1055 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context. |
1056 __ lw(V0, Address(SP, 1 * kWordSize)); // Get the new context. | 1056 __ lw(V0, Address(SP, 1 * kWordSize)); // Get the new context. |
1057 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Pop argument and return. | 1057 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Pop argument and return. |
1058 | 1058 |
1059 // V0: new object | 1059 // V0: new object |
1060 // Restore the frame pointer. | 1060 // Restore the frame pointer. |
1061 __ LeaveStubFrameAndReturn(); | 1061 __ LeaveStubFrameAndReturn(); |
1062 } | 1062 } |
1063 | 1063 |
1064 | 1064 |
1065 // Helper stub to implement Assembler::StoreIntoObject. | 1065 // Helper stub to implement Assembler::StoreIntoObject. |
1066 // Input parameters: | 1066 // Input parameters: |
1067 // T0: Address (i.e. object) being stored into. | 1067 // T0: Address (i.e. object) being stored into. |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1197 | 1197 |
1198 // Initialize the remaining words of the object. | 1198 // Initialize the remaining words of the object. |
1199 // T2: new object start. | 1199 // T2: new object start. |
1200 // T3: next object start. | 1200 // T3: next object start. |
1201 // T1: new object type arguments (if is_cls_parameterized). | 1201 // T1: new object type arguments (if is_cls_parameterized). |
1202 // First try inlining the initialization without a loop. | 1202 // First try inlining the initialization without a loop. |
1203 if (instance_size < (kInlineInstanceSize * kWordSize)) { | 1203 if (instance_size < (kInlineInstanceSize * kWordSize)) { |
1204 // Check if the object contains any non-header fields. | 1204 // Check if the object contains any non-header fields. |
1205 // Small objects are initialized using a consecutive set of writes. | 1205 // Small objects are initialized using a consecutive set of writes. |
1206 for (intptr_t current_offset = Instance::NextFieldOffset(); | 1206 for (intptr_t current_offset = Instance::NextFieldOffset(); |
1207 current_offset < instance_size; | 1207 current_offset < instance_size; current_offset += kWordSize) { |
1208 current_offset += kWordSize) { | |
1209 __ sw(T7, Address(T2, current_offset)); | 1208 __ sw(T7, Address(T2, current_offset)); |
1210 } | 1209 } |
1211 } else { | 1210 } else { |
1212 __ addiu(T4, T2, Immediate(Instance::NextFieldOffset())); | 1211 __ addiu(T4, T2, Immediate(Instance::NextFieldOffset())); |
1213 // Loop until the whole object is initialized. | 1212 // Loop until the whole object is initialized. |
1214 // T2: new object. | 1213 // T2: new object. |
1215 // T3: next object start. | 1214 // T3: next object start. |
1216 // T4: next word to be initialized. | 1215 // T4: next word to be initialized. |
1217 // T1: new object type arguments (if is_cls_parameterized). | 1216 // T1: new object type arguments (if is_cls_parameterized). |
1218 Label loop, loop_exit; | 1217 Label loop, loop_exit; |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1306 // function and not the top-scope function. | 1305 // function and not the top-scope function. |
1307 void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) { | 1306 void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) { |
1308 __ Comment("OptimizedUsageCounterIncrement"); | 1307 __ Comment("OptimizedUsageCounterIncrement"); |
1309 Register ic_reg = S5; | 1308 Register ic_reg = S5; |
1310 Register func_reg = T0; | 1309 Register func_reg = T0; |
1311 if (FLAG_trace_optimized_ic_calls) { | 1310 if (FLAG_trace_optimized_ic_calls) { |
1312 __ EnterStubFrame(); | 1311 __ EnterStubFrame(); |
1313 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | 1312 __ addiu(SP, SP, Immediate(-4 * kWordSize)); |
1314 __ sw(T0, Address(SP, 3 * kWordSize)); | 1313 __ sw(T0, Address(SP, 3 * kWordSize)); |
1315 __ sw(S5, Address(SP, 2 * kWordSize)); | 1314 __ sw(S5, Address(SP, 2 * kWordSize)); |
1316 __ sw(ic_reg, Address(SP, 1 * kWordSize)); // Argument. | 1315 __ sw(ic_reg, Address(SP, 1 * kWordSize)); // Argument. |
1317 __ sw(func_reg, Address(SP, 0 * kWordSize)); // Argument. | 1316 __ sw(func_reg, Address(SP, 0 * kWordSize)); // Argument. |
1318 __ CallRuntime(kTraceICCallRuntimeEntry, 2); | 1317 __ CallRuntime(kTraceICCallRuntimeEntry, 2); |
1319 __ lw(S5, Address(SP, 2 * kWordSize)); | 1318 __ lw(S5, Address(SP, 2 * kWordSize)); |
1320 __ lw(T0, Address(SP, 3 * kWordSize)); | 1319 __ lw(T0, Address(SP, 3 * kWordSize)); |
1321 __ addiu(SP, SP, Immediate(4 * kWordSize)); // Discard argument; | 1320 __ addiu(SP, SP, Immediate(4 * kWordSize)); // Discard argument; |
1322 __ LeaveStubFrame(); | 1321 __ LeaveStubFrame(); |
1323 } | 1322 } |
1324 __ lw(T7, FieldAddress(func_reg, Function::usage_counter_offset())); | 1323 __ lw(T7, FieldAddress(func_reg, Function::usage_counter_offset())); |
1325 __ addiu(T7, T7, Immediate(1)); | 1324 __ addiu(T7, T7, Immediate(1)); |
1326 __ sw(T7, FieldAddress(func_reg, Function::usage_counter_offset())); | 1325 __ sw(T7, FieldAddress(func_reg, Function::usage_counter_offset())); |
(...skipping 27 matching lines...) Expand all Loading... |
1354 Label* not_smi_or_overflow) { | 1353 Label* not_smi_or_overflow) { |
1355 __ Comment("Fast Smi op"); | 1354 __ Comment("Fast Smi op"); |
1356 ASSERT(num_args == 2); | 1355 ASSERT(num_args == 2); |
1357 __ lw(T0, Address(SP, 0 * kWordSize)); // Left. | 1356 __ lw(T0, Address(SP, 0 * kWordSize)); // Left. |
1358 __ lw(T1, Address(SP, 1 * kWordSize)); // Right. | 1357 __ lw(T1, Address(SP, 1 * kWordSize)); // Right. |
1359 __ or_(CMPRES1, T0, T1); | 1358 __ or_(CMPRES1, T0, T1); |
1360 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); | 1359 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); |
1361 __ bne(CMPRES1, ZR, not_smi_or_overflow); | 1360 __ bne(CMPRES1, ZR, not_smi_or_overflow); |
1362 switch (kind) { | 1361 switch (kind) { |
1363 case Token::kADD: { | 1362 case Token::kADD: { |
1364 __ AdduDetectOverflow(V0, T1, T0, CMPRES1); // Add. | 1363 __ AdduDetectOverflow(V0, T1, T0, CMPRES1); // Add. |
1365 __ bltz(CMPRES1, not_smi_or_overflow); // Fall through on overflow. | 1364 __ bltz(CMPRES1, not_smi_or_overflow); // Fall through on overflow. |
1366 break; | 1365 break; |
1367 } | 1366 } |
1368 case Token::kSUB: { | 1367 case Token::kSUB: { |
1369 __ SubuDetectOverflow(V0, T1, T0, CMPRES1); // Subtract. | 1368 __ SubuDetectOverflow(V0, T1, T0, CMPRES1); // Subtract. |
1370 __ bltz(CMPRES1, not_smi_or_overflow); // Fall through on overflow. | 1369 __ bltz(CMPRES1, not_smi_or_overflow); // Fall through on overflow. |
1371 break; | 1370 break; |
1372 } | 1371 } |
1373 case Token::kEQ: { | 1372 case Token::kEQ: { |
1374 Label true_label, done; | 1373 Label true_label, done; |
1375 __ beq(T1, T0, &true_label); | 1374 __ beq(T1, T0, &true_label); |
1376 __ LoadObject(V0, Bool::False()); | 1375 __ LoadObject(V0, Bool::False()); |
1377 __ b(&done); | 1376 __ b(&done); |
1378 __ Bind(&true_label); | 1377 __ Bind(&true_label); |
1379 __ LoadObject(V0, Bool::True()); | 1378 __ LoadObject(V0, Bool::True()); |
1380 __ Bind(&done); | 1379 __ Bind(&done); |
1381 break; | 1380 break; |
1382 } | 1381 } |
1383 default: UNIMPLEMENTED(); | 1382 default: |
| 1383 UNIMPLEMENTED(); |
1384 } | 1384 } |
1385 // S5: IC data object (preserved). | 1385 // S5: IC data object (preserved). |
1386 __ lw(T0, FieldAddress(S5, ICData::ic_data_offset())); | 1386 __ lw(T0, FieldAddress(S5, ICData::ic_data_offset())); |
1387 // T0: ic_data_array with check entries: classes and target functions. | 1387 // T0: ic_data_array with check entries: classes and target functions. |
1388 __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag); | 1388 __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag); |
1389 // T0: points directly to the first ic data array element. | 1389 // T0: points directly to the first ic data array element. |
1390 #if defined(DEBUG) | 1390 #if defined(DEBUG) |
1391 // Check that first entry is for Smi/Smi. | 1391 // Check that first entry is for Smi/Smi. |
1392 Label error, ok; | 1392 Label error, ok; |
1393 const int32_t imm_smi_cid = reinterpret_cast<int32_t>(Smi::New(kSmiCid)); | 1393 const int32_t imm_smi_cid = reinterpret_cast<int32_t>(Smi::New(kSmiCid)); |
1394 __ lw(T4, Address(T0)); | 1394 __ lw(T4, Address(T0)); |
1395 __ BranchNotEqual(T4, Immediate(imm_smi_cid), &error); | 1395 __ BranchNotEqual(T4, Immediate(imm_smi_cid), &error); |
1396 __ lw(T4, Address(T0, kWordSize)); | 1396 __ lw(T4, Address(T0, kWordSize)); |
1397 __ BranchEqual(T4, Immediate(imm_smi_cid), &ok); | 1397 __ BranchEqual(T4, Immediate(imm_smi_cid), &ok); |
1398 __ Bind(&error); | 1398 __ Bind(&error); |
1399 __ Stop("Incorrect IC data"); | 1399 __ Stop("Incorrect IC data"); |
(...skipping 26 matching lines...) Expand all Loading... |
1426 // - Match not found -> jump to IC miss. | 1426 // - Match not found -> jump to IC miss. |
1427 void StubCode::GenerateNArgsCheckInlineCacheStub( | 1427 void StubCode::GenerateNArgsCheckInlineCacheStub( |
1428 Assembler* assembler, | 1428 Assembler* assembler, |
1429 intptr_t num_args, | 1429 intptr_t num_args, |
1430 const RuntimeEntry& handle_ic_miss, | 1430 const RuntimeEntry& handle_ic_miss, |
1431 Token::Kind kind, | 1431 Token::Kind kind, |
1432 bool optimized) { | 1432 bool optimized) { |
1433 __ Comment("NArgsCheckInlineCacheStub"); | 1433 __ Comment("NArgsCheckInlineCacheStub"); |
1434 ASSERT(num_args > 0); | 1434 ASSERT(num_args > 0); |
1435 #if defined(DEBUG) | 1435 #if defined(DEBUG) |
1436 { Label ok; | 1436 { |
| 1437 Label ok; |
1437 // Check that the IC data array has NumArgsTested() == num_args. | 1438 // Check that the IC data array has NumArgsTested() == num_args. |
1438 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. | 1439 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. |
1439 __ lw(T0, FieldAddress(S5, ICData::state_bits_offset())); | 1440 __ lw(T0, FieldAddress(S5, ICData::state_bits_offset())); |
1440 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. | 1441 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. |
1441 __ andi(T0, T0, Immediate(ICData::NumArgsTestedMask())); | 1442 __ andi(T0, T0, Immediate(ICData::NumArgsTestedMask())); |
1442 __ BranchEqual(T0, Immediate(num_args), &ok); | 1443 __ BranchEqual(T0, Immediate(num_args), &ok); |
1443 __ Stop("Incorrect stub for IC data"); | 1444 __ Stop("Incorrect stub for IC data"); |
1444 __ Bind(&ok); | 1445 __ Bind(&ok); |
1445 } | 1446 } |
1446 #endif // DEBUG | 1447 #endif // DEBUG |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1503 // T3: next argument class ID (smi). | 1504 // T3: next argument class ID (smi). |
1504 __ lw(T4, Address(T0, i * kWordSize)); | 1505 __ lw(T4, Address(T0, i * kWordSize)); |
1505 // T4: next class ID to check (smi). | 1506 // T4: next class ID to check (smi). |
1506 } | 1507 } |
1507 if (i < (num_args - 1)) { | 1508 if (i < (num_args - 1)) { |
1508 __ bne(T3, T4, &update); // Continue. | 1509 __ bne(T3, T4, &update); // Continue. |
1509 } else { | 1510 } else { |
1510 // Last check, all checks before matched. | 1511 // Last check, all checks before matched. |
1511 Label skip; | 1512 Label skip; |
1512 __ bne(T3, T4, &skip); | 1513 __ bne(T3, T4, &skip); |
1513 __ b(&found); // Break. | 1514 __ b(&found); // Break. |
1514 __ delay_slot()->mov(RA, T2); // Restore return address if found. | 1515 __ delay_slot()->mov(RA, T2); // Restore return address if found. |
1515 __ Bind(&skip); | 1516 __ Bind(&skip); |
1516 } | 1517 } |
1517 } | 1518 } |
1518 __ Bind(&update); | 1519 __ Bind(&update); |
1519 // Reload receiver class ID. It has not been destroyed when num_args == 1. | 1520 // Reload receiver class ID. It has not been destroyed when num_args == 1. |
1520 if (num_args > 1) { | 1521 if (num_args > 1) { |
1521 __ sll(T3, T1, 1); | 1522 __ sll(T3, T1, 1); |
1522 __ addu(T3, T3, SP); | 1523 __ addu(T3, T3, SP); |
1523 __ lw(T3, Address(T3)); | 1524 __ lw(T3, Address(T3)); |
1524 __ LoadTaggedClassIdMayBeSmi(T3, T3); | 1525 __ LoadTaggedClassIdMayBeSmi(T3, T3); |
1525 } | 1526 } |
1526 | 1527 |
1527 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize; | 1528 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize; |
1528 __ AddImmediate(T0, entry_size); // Next entry. | 1529 __ AddImmediate(T0, entry_size); // Next entry. |
1529 __ lw(T4, Address(T0)); // Next class ID. | 1530 __ lw(T4, Address(T0)); // Next class ID. |
1530 | 1531 |
1531 __ Bind(&test); | 1532 __ Bind(&test); |
1532 __ BranchNotEqual(T4, Immediate(Smi::RawValue(kIllegalCid)), &loop); // Done? | 1533 __ BranchNotEqual(T4, Immediate(Smi::RawValue(kIllegalCid)), &loop); // Done? |
1533 | 1534 |
1534 __ Comment("IC miss"); | 1535 __ Comment("IC miss"); |
1535 // Restore return address. | 1536 // Restore return address. |
1536 __ mov(RA, T2); | 1537 __ mov(RA, T2); |
1537 | 1538 |
1538 // Compute address of arguments (first read number of arguments from | 1539 // Compute address of arguments (first read number of arguments from |
1539 // arguments descriptor array and then compute address on the stack). | 1540 // arguments descriptor array and then compute address on the stack). |
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1628 // RA: Return address. | 1629 // RA: Return address. |
1629 // S5: Inline cache data object. | 1630 // S5: Inline cache data object. |
1630 // Inline cache data object structure: | 1631 // Inline cache data object structure: |
1631 // 0: function-name | 1632 // 0: function-name |
1632 // 1: N, number of arguments checked. | 1633 // 1: N, number of arguments checked. |
1633 // 2 .. (length - 1): group of checks, each check containing: | 1634 // 2 .. (length - 1): group of checks, each check containing: |
1634 // - N classes. | 1635 // - N classes. |
1635 // - 1 target function. | 1636 // - 1 target function. |
1636 void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) { | 1637 void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) { |
1637 GenerateUsageCounterIncrement(assembler, T0); | 1638 GenerateUsageCounterIncrement(assembler, T0); |
1638 GenerateNArgsCheckInlineCacheStub(assembler, 1, | 1639 GenerateNArgsCheckInlineCacheStub( |
1639 kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); | 1640 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); |
1640 } | 1641 } |
1641 | 1642 |
1642 | 1643 |
1643 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { | 1644 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { |
1644 GenerateUsageCounterIncrement(assembler, T0); | 1645 GenerateUsageCounterIncrement(assembler, T0); |
1645 GenerateNArgsCheckInlineCacheStub(assembler, 2, | 1646 GenerateNArgsCheckInlineCacheStub(assembler, 2, |
1646 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL); | 1647 kInlineCacheMissHandlerTwoArgsRuntimeEntry, |
| 1648 Token::kILLEGAL); |
1647 } | 1649 } |
1648 | 1650 |
1649 | 1651 |
1650 void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) { | 1652 void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) { |
1651 GenerateUsageCounterIncrement(assembler, T0); | 1653 GenerateUsageCounterIncrement(assembler, T0); |
1652 GenerateNArgsCheckInlineCacheStub(assembler, 2, | 1654 GenerateNArgsCheckInlineCacheStub( |
1653 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD); | 1655 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD); |
1654 } | 1656 } |
1655 | 1657 |
1656 | 1658 |
1657 void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) { | 1659 void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) { |
1658 GenerateUsageCounterIncrement(assembler, T0); | 1660 GenerateUsageCounterIncrement(assembler, T0); |
1659 GenerateNArgsCheckInlineCacheStub(assembler, 2, | 1661 GenerateNArgsCheckInlineCacheStub( |
1660 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB); | 1662 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB); |
1661 } | 1663 } |
1662 | 1664 |
1663 | 1665 |
1664 void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) { | 1666 void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) { |
1665 GenerateUsageCounterIncrement(assembler, T0); | 1667 GenerateUsageCounterIncrement(assembler, T0); |
1666 GenerateNArgsCheckInlineCacheStub(assembler, 2, | 1668 GenerateNArgsCheckInlineCacheStub( |
1667 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ); | 1669 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ); |
1668 } | 1670 } |
1669 | 1671 |
1670 | 1672 |
1671 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( | 1673 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( |
1672 Assembler* assembler) { | 1674 Assembler* assembler) { |
1673 GenerateOptimizedUsageCounterIncrement(assembler); | 1675 GenerateOptimizedUsageCounterIncrement(assembler); |
1674 GenerateNArgsCheckInlineCacheStub(assembler, 1, | 1676 GenerateNArgsCheckInlineCacheStub(assembler, 1, |
1675 kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, | 1677 kInlineCacheMissHandlerOneArgRuntimeEntry, |
1676 true /* optimized */); | 1678 Token::kILLEGAL, true /* optimized */); |
1677 } | 1679 } |
1678 | 1680 |
1679 | 1681 |
1680 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub( | 1682 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub( |
1681 Assembler* assembler) { | 1683 Assembler* assembler) { |
1682 GenerateOptimizedUsageCounterIncrement(assembler); | 1684 GenerateOptimizedUsageCounterIncrement(assembler); |
1683 GenerateNArgsCheckInlineCacheStub(assembler, 2, | 1685 GenerateNArgsCheckInlineCacheStub(assembler, 2, |
1684 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL, | 1686 kInlineCacheMissHandlerTwoArgsRuntimeEntry, |
1685 true /* optimized */); | 1687 Token::kILLEGAL, true /* optimized */); |
1686 } | 1688 } |
1687 | 1689 |
1688 | 1690 |
1689 // Intermediary stub between a static call and its target. ICData contains | 1691 // Intermediary stub between a static call and its target. ICData contains |
1690 // the target function and the call count. | 1692 // the target function and the call count. |
1691 // S5: ICData | 1693 // S5: ICData |
1692 void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) { | 1694 void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) { |
1693 GenerateUsageCounterIncrement(assembler, T0); | 1695 GenerateUsageCounterIncrement(assembler, T0); |
1694 __ Comment("UnoptimizedStaticCallStub"); | 1696 __ Comment("UnoptimizedStaticCallStub"); |
1695 #if defined(DEBUG) | 1697 #if defined(DEBUG) |
1696 { Label ok; | 1698 { |
| 1699 Label ok; |
1697 // Check that the IC data array has NumArgsTested() == 0. | 1700 // Check that the IC data array has NumArgsTested() == 0. |
1698 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. | 1701 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. |
1699 __ lw(T0, FieldAddress(S5, ICData::state_bits_offset())); | 1702 __ lw(T0, FieldAddress(S5, ICData::state_bits_offset())); |
1700 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. | 1703 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. |
1701 __ andi(T0, T0, Immediate(ICData::NumArgsTestedMask())); | 1704 __ andi(T0, T0, Immediate(ICData::NumArgsTestedMask())); |
1702 __ beq(T0, ZR, &ok); | 1705 __ beq(T0, ZR, &ok); |
1703 __ Stop("Incorrect IC data for unoptimized static call"); | 1706 __ Stop("Incorrect IC data for unoptimized static call"); |
1704 __ Bind(&ok); | 1707 __ Bind(&ok); |
1705 } | 1708 } |
1706 #endif // DEBUG | 1709 #endif // DEBUG |
(...skipping 19 matching lines...) Expand all Loading... |
1726 // Increment count for this call. | 1729 // Increment count for this call. |
1727 __ lw(T4, Address(T0, count_offset)); | 1730 __ lw(T4, Address(T0, count_offset)); |
1728 __ AddImmediateDetectOverflow(T7, T4, Smi::RawValue(1), T5, T6); | 1731 __ AddImmediateDetectOverflow(T7, T4, Smi::RawValue(1), T5, T6); |
1729 __ slt(CMPRES1, T5, ZR); // T5 is < 0 if there was overflow. | 1732 __ slt(CMPRES1, T5, ZR); // T5 is < 0 if there was overflow. |
1730 __ LoadImmediate(T4, Smi::RawValue(Smi::kMaxValue)); | 1733 __ LoadImmediate(T4, Smi::RawValue(Smi::kMaxValue)); |
1731 __ movz(T4, T7, CMPRES1); | 1734 __ movz(T4, T7, CMPRES1); |
1732 __ sw(T4, Address(T0, count_offset)); | 1735 __ sw(T4, Address(T0, count_offset)); |
1733 } | 1736 } |
1734 | 1737 |
1735 // Load arguments descriptor into S4. | 1738 // Load arguments descriptor into S4. |
1736 __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); | 1739 __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); |
1737 | 1740 |
1738 // Get function and call it, if possible. | 1741 // Get function and call it, if possible. |
1739 __ lw(T0, Address(T0, target_offset)); | 1742 __ lw(T0, Address(T0, target_offset)); |
1740 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | 1743 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); |
1741 __ lw(T4, FieldAddress(T0, Function::entry_point_offset())); | 1744 __ lw(T4, FieldAddress(T0, Function::entry_point_offset())); |
1742 __ jr(T4); | 1745 __ jr(T4); |
1743 | 1746 |
1744 // Call single step callback in debugger. | 1747 // Call single step callback in debugger. |
1745 if (FLAG_support_debugger) { | 1748 if (FLAG_support_debugger) { |
1746 __ Bind(&stepping); | 1749 __ Bind(&stepping); |
(...skipping 14 matching lines...) Expand all Loading... |
1761 | 1764 |
1762 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { | 1765 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { |
1763 GenerateUsageCounterIncrement(assembler, T0); | 1766 GenerateUsageCounterIncrement(assembler, T0); |
1764 GenerateNArgsCheckInlineCacheStub( | 1767 GenerateNArgsCheckInlineCacheStub( |
1765 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); | 1768 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); |
1766 } | 1769 } |
1767 | 1770 |
1768 | 1771 |
1769 void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) { | 1772 void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) { |
1770 GenerateUsageCounterIncrement(assembler, T0); | 1773 GenerateUsageCounterIncrement(assembler, T0); |
1771 GenerateNArgsCheckInlineCacheStub(assembler, 2, | 1774 GenerateNArgsCheckInlineCacheStub( |
1772 kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL); | 1775 assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL); |
1773 } | 1776 } |
1774 | 1777 |
1775 | 1778 |
1776 // Stub for compiling a function and jumping to the compiled code. | 1779 // Stub for compiling a function and jumping to the compiled code. |
1777 // S5: IC-Data (for methods). | 1780 // S5: IC-Data (for methods). |
1778 // S4: Arguments descriptor. | 1781 // S4: Arguments descriptor. |
1779 // T0: Function. | 1782 // T0: Function. |
1780 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { | 1783 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { |
1781 __ EnterStubFrame(); | 1784 __ EnterStubFrame(); |
1782 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | 1785 __ addiu(SP, SP, Immediate(-3 * kWordSize)); |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1864 // Result in V0: null -> not found, otherwise result (true or false). | 1867 // Result in V0: null -> not found, otherwise result (true or false). |
1865 static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) { | 1868 static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) { |
1866 __ Comment("SubtypeNTestCacheStub"); | 1869 __ Comment("SubtypeNTestCacheStub"); |
1867 ASSERT((1 <= n) && (n <= 3)); | 1870 ASSERT((1 <= n) && (n <= 3)); |
1868 if (n > 1) { | 1871 if (n > 1) { |
1869 // Get instance type arguments. | 1872 // Get instance type arguments. |
1870 __ LoadClass(T0, A0); | 1873 __ LoadClass(T0, A0); |
1871 // Compute instance type arguments into T1. | 1874 // Compute instance type arguments into T1. |
1872 Label has_no_type_arguments; | 1875 Label has_no_type_arguments; |
1873 __ LoadObject(T1, Object::null_object()); | 1876 __ LoadObject(T1, Object::null_object()); |
1874 __ lw(T2, FieldAddress(T0, | 1877 __ lw(T2, FieldAddress( |
1875 Class::type_arguments_field_offset_in_words_offset())); | 1878 T0, Class::type_arguments_field_offset_in_words_offset())); |
1876 __ BranchEqual( | 1879 __ BranchEqual(T2, Immediate(Class::kNoTypeArguments), |
1877 T2, Immediate(Class::kNoTypeArguments), &has_no_type_arguments); | 1880 &has_no_type_arguments); |
1878 __ sll(T2, T2, 2); | 1881 __ sll(T2, T2, 2); |
1879 __ addu(T2, A0, T2); // T2 <- A0 + T2 * 4 | 1882 __ addu(T2, A0, T2); // T2 <- A0 + T2 * 4 |
1880 __ lw(T1, FieldAddress(T2, 0)); | 1883 __ lw(T1, FieldAddress(T2, 0)); |
1881 __ Bind(&has_no_type_arguments); | 1884 __ Bind(&has_no_type_arguments); |
1882 } | 1885 } |
1883 __ LoadClassId(T0, A0); | 1886 __ LoadClassId(T0, A0); |
1884 // A0: instance. | 1887 // A0: instance. |
1885 // A1: instantiator type arguments or NULL. | 1888 // A1: instantiator type arguments or NULL. |
1886 // A2: SubtypeTestCache. | 1889 // A2: SubtypeTestCache. |
1887 // T0: instance class id. | 1890 // T0: instance class id. |
(...skipping 21 matching lines...) Expand all Loading... |
1909 __ beq(T3, T0, &found); | 1912 __ beq(T3, T0, &found); |
1910 } else { | 1913 } else { |
1911 __ bne(T3, T0, &next_iteration); | 1914 __ bne(T3, T0, &next_iteration); |
1912 __ lw(T3, | 1915 __ lw(T3, |
1913 Address(T2, kWordSize * SubtypeTestCache::kInstanceTypeArguments)); | 1916 Address(T2, kWordSize * SubtypeTestCache::kInstanceTypeArguments)); |
1914 if (n == 2) { | 1917 if (n == 2) { |
1915 __ beq(T3, T1, &found); | 1918 __ beq(T3, T1, &found); |
1916 } else { | 1919 } else { |
1917 __ bne(T3, T1, &next_iteration); | 1920 __ bne(T3, T1, &next_iteration); |
1918 __ lw(T3, Address(T2, kWordSize * | 1921 __ lw(T3, Address(T2, kWordSize * |
1919 SubtypeTestCache::kInstantiatorTypeArguments)); | 1922 SubtypeTestCache::kInstantiatorTypeArguments)); |
1920 __ beq(T3, A1, &found); | 1923 __ beq(T3, A1, &found); |
1921 } | 1924 } |
1922 } | 1925 } |
1923 __ Bind(&next_iteration); | 1926 __ Bind(&next_iteration); |
1924 __ b(&loop); | 1927 __ b(&loop); |
1925 __ delay_slot()->addiu(T2, T2, | 1928 __ delay_slot()->addiu( |
1926 Immediate(kWordSize * SubtypeTestCache::kTestEntryLength)); | 1929 T2, T2, Immediate(kWordSize * SubtypeTestCache::kTestEntryLength)); |
1927 // Fall through to not found. | 1930 // Fall through to not found. |
1928 __ Bind(¬_found); | 1931 __ Bind(¬_found); |
1929 __ Ret(); | 1932 __ Ret(); |
1930 __ delay_slot()->mov(V0, T7); | 1933 __ delay_slot()->mov(V0, T7); |
1931 | 1934 |
1932 __ Bind(&found); | 1935 __ Bind(&found); |
1933 __ Ret(); | 1936 __ Ret(); |
1934 __ delay_slot()->lw(V0, | 1937 __ delay_slot()->lw(V0, |
1935 Address(T2, kWordSize * SubtypeTestCache::kTestResult)); | 1938 Address(T2, kWordSize * SubtypeTestCache::kTestResult)); |
1936 } | 1939 } |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1985 // A3: error object. | 1988 // A3: error object. |
1986 // SP + 4*kWordSize: address of stacktrace object. | 1989 // SP + 4*kWordSize: address of stacktrace object. |
1987 // SP + 5*kWordSize: address of thread. | 1990 // SP + 5*kWordSize: address of thread. |
1988 // Does not return. | 1991 // Does not return. |
1989 void StubCode::GenerateJumpToExceptionHandlerStub(Assembler* assembler) { | 1992 void StubCode::GenerateJumpToExceptionHandlerStub(Assembler* assembler) { |
1990 ASSERT(kExceptionObjectReg == V0); | 1993 ASSERT(kExceptionObjectReg == V0); |
1991 ASSERT(kStackTraceObjectReg == V1); | 1994 ASSERT(kStackTraceObjectReg == V1); |
1992 __ mov(V0, A3); // Exception object. | 1995 __ mov(V0, A3); // Exception object. |
1993 // MIPS ABI reserves stack space for all arguments. The StackTrace object is | 1996 // MIPS ABI reserves stack space for all arguments. The StackTrace object is |
1994 // the last of five arguments, so it is first pushed on the stack. | 1997 // the last of five arguments, so it is first pushed on the stack. |
1995 __ lw(V1, Address(SP, 4 * kWordSize)); // StackTrace object. | 1998 __ lw(V1, Address(SP, 4 * kWordSize)); // StackTrace object. |
1996 __ mov(FP, A2); // Frame_pointer. | 1999 __ mov(FP, A2); // Frame_pointer. |
1997 __ lw(THR, Address(SP, 5 * kWordSize)); // Thread. | 2000 __ lw(THR, Address(SP, 5 * kWordSize)); // Thread. |
1998 // Set tag. | 2001 // Set tag. |
1999 __ LoadImmediate(A2, VMTag::kDartTagId); | 2002 __ LoadImmediate(A2, VMTag::kDartTagId); |
2000 __ sw(A2, Assembler::VMTagAddress()); | 2003 __ sw(A2, Assembler::VMTagAddress()); |
2001 // Clear top exit frame. | 2004 // Clear top exit frame. |
2002 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | 2005 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); |
2003 // Restore pool pointer. | 2006 // Restore pool pointer. |
2004 __ RestoreCodePointer(); | 2007 __ RestoreCodePointer(); |
2005 __ LoadPoolPointer(); | 2008 __ LoadPoolPointer(); |
2006 __ jr(A0); // Jump to the exception handler code. | 2009 __ jr(A0); // Jump to the exception handler code. |
2007 __ delay_slot()->mov(SP, A1); // Stack pointer. | 2010 __ delay_slot()->mov(SP, A1); // Stack pointer. |
2008 } | 2011 } |
2009 | 2012 |
2010 | 2013 |
2011 // Calls to the runtime to optimize the given function. | 2014 // Calls to the runtime to optimize the given function. |
2012 // T0: function to be reoptimized. | 2015 // T0: function to be reoptimized. |
2013 // S4: argument descriptor (preserved). | 2016 // S4: argument descriptor (preserved). |
2014 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { | 2017 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { |
2015 __ Comment("OptimizeFunctionStub"); | 2018 __ Comment("OptimizeFunctionStub"); |
2016 __ EnterStubFrame(); | 2019 __ EnterStubFrame(); |
2017 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | 2020 __ addiu(SP, SP, Immediate(-3 * kWordSize)); |
2018 __ sw(S4, Address(SP, 2 * kWordSize)); | 2021 __ sw(S4, Address(SP, 2 * kWordSize)); |
2019 // Setup space on stack for return value. | 2022 // Setup space on stack for return value. |
2020 __ sw(ZR, Address(SP, 1 * kWordSize)); | 2023 __ sw(ZR, Address(SP, 1 * kWordSize)); |
2021 __ sw(T0, Address(SP, 0 * kWordSize)); | 2024 __ sw(T0, Address(SP, 0 * kWordSize)); |
2022 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); | 2025 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); |
2023 __ Comment("OptimizeFunctionStub return"); | 2026 __ Comment("OptimizeFunctionStub return"); |
2024 __ lw(T0, Address(SP, 1 * kWordSize)); // Get Function object | 2027 __ lw(T0, Address(SP, 1 * kWordSize)); // Get Function object |
2025 __ lw(S4, Address(SP, 2 * kWordSize)); // Restore argument descriptor. | 2028 __ lw(S4, Address(SP, 2 * kWordSize)); // Restore argument descriptor. |
2026 __ addiu(SP, SP, Immediate(3 * kWordSize)); // Discard argument. | 2029 __ addiu(SP, SP, Immediate(3 * kWordSize)); // Discard argument. |
2027 | 2030 |
2028 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | 2031 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); |
2029 __ lw(T1, FieldAddress(T0, Function::entry_point_offset())); | 2032 __ lw(T1, FieldAddress(T0, Function::entry_point_offset())); |
2030 __ LeaveStubFrameAndReturn(T1); | 2033 __ LeaveStubFrameAndReturn(T1); |
2031 __ break_(0); | 2034 __ break_(0); |
2032 } | 2035 } |
2033 | 2036 |
2034 | 2037 |
2035 // Does identical check (object references are equal or not equal) with special | 2038 // Does identical check (object references are equal or not equal) with special |
(...skipping 342 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2378 } | 2381 } |
2379 | 2382 |
2380 | 2383 |
2381 void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) { | 2384 void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) { |
2382 __ break_(0); | 2385 __ break_(0); |
2383 } | 2386 } |
2384 | 2387 |
2385 } // namespace dart | 2388 } // namespace dart |
2386 | 2389 |
2387 #endif // defined TARGET_ARCH_MIPS | 2390 #endif // defined TARGET_ARCH_MIPS |
OLD | NEW |