Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(120)

Side by Side Diff: runtime/vm/stub_code_arm.cc

Issue 2481873005: clang-format runtime/vm (Closed)
Patch Set: Merge Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/stub_code.cc ('k') | runtime/vm/stub_code_arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" 5 #include "vm/globals.h"
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/code_generator.h" 9 #include "vm/code_generator.h"
10 #include "vm/cpu.h" 10 #include "vm/cpu.h"
11 #include "vm/compiler.h" 11 #include "vm/compiler.h"
12 #include "vm/dart_entry.h" 12 #include "vm/dart_entry.h"
13 #include "vm/flow_graph_compiler.h" 13 #include "vm/flow_graph_compiler.h"
14 #include "vm/heap.h" 14 #include "vm/heap.h"
15 #include "vm/instructions.h" 15 #include "vm/instructions.h"
16 #include "vm/object_store.h" 16 #include "vm/object_store.h"
17 #include "vm/stack_frame.h" 17 #include "vm/stack_frame.h"
18 #include "vm/stub_code.h" 18 #include "vm/stub_code.h"
19 #include "vm/tags.h" 19 #include "vm/tags.h"
20 20
21 #define __ assembler-> 21 #define __ assembler->
22 22
23 namespace dart { 23 namespace dart {
24 24
25 DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects."); 25 DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects.");
26 DEFINE_FLAG(bool, use_slow_path, false, 26 DEFINE_FLAG(bool,
27 "Set to true for debugging & verifying the slow paths."); 27 use_slow_path,
28 false,
29 "Set to true for debugging & verifying the slow paths.");
28 DECLARE_FLAG(bool, trace_optimized_ic_calls); 30 DECLARE_FLAG(bool, trace_optimized_ic_calls);
29 31
30 // Input parameters: 32 // Input parameters:
31 // LR : return address. 33 // LR : return address.
32 // SP : address of last argument in argument array. 34 // SP : address of last argument in argument array.
33 // SP + 4*R4 - 4 : address of first argument in argument array. 35 // SP + 4*R4 - 4 : address of first argument in argument array.
34 // SP + 4*R4 : address of return value. 36 // SP + 4*R4 : address of return value.
35 // R9 : address of the runtime function to call. 37 // R9 : address of the runtime function to call.
36 // R4 : number of arguments to the call. 38 // R4 : number of arguments to the call.
37 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { 39 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
38 const intptr_t thread_offset = NativeArguments::thread_offset(); 40 const intptr_t thread_offset = NativeArguments::thread_offset();
39 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); 41 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
40 const intptr_t argv_offset = NativeArguments::argv_offset(); 42 const intptr_t argv_offset = NativeArguments::argv_offset();
41 const intptr_t retval_offset = NativeArguments::retval_offset(); 43 const intptr_t retval_offset = NativeArguments::retval_offset();
42 44
43 __ EnterStubFrame(); 45 __ EnterStubFrame();
44 46
45 // Save exit frame information to enable stack walking as we are about 47 // Save exit frame information to enable stack walking as we are about
46 // to transition to Dart VM C++ code. 48 // to transition to Dart VM C++ code.
47 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); 49 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
48 50
49 #if defined(DEBUG) 51 #if defined(DEBUG)
50 { Label ok; 52 {
53 Label ok;
51 // Check that we are always entering from Dart code. 54 // Check that we are always entering from Dart code.
52 __ LoadFromOffset(kWord, R8, THR, Thread::vm_tag_offset()); 55 __ LoadFromOffset(kWord, R8, THR, Thread::vm_tag_offset());
53 __ CompareImmediate(R8, VMTag::kDartTagId); 56 __ CompareImmediate(R8, VMTag::kDartTagId);
54 __ b(&ok, EQ); 57 __ b(&ok, EQ);
55 __ Stop("Not coming from Dart code."); 58 __ Stop("Not coming from Dart code.");
56 __ Bind(&ok); 59 __ Bind(&ok);
57 } 60 }
58 #endif 61 #endif
59 62
60 // Mark that the thread is executing VM code. 63 // Mark that the thread is executing VM code.
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
132 const intptr_t argv_offset = NativeArguments::argv_offset(); 135 const intptr_t argv_offset = NativeArguments::argv_offset();
133 const intptr_t retval_offset = NativeArguments::retval_offset(); 136 const intptr_t retval_offset = NativeArguments::retval_offset();
134 137
135 __ EnterStubFrame(); 138 __ EnterStubFrame();
136 139
137 // Save exit frame information to enable stack walking as we are about 140 // Save exit frame information to enable stack walking as we are about
138 // to transition to native code. 141 // to transition to native code.
139 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); 142 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
140 143
141 #if defined(DEBUG) 144 #if defined(DEBUG)
142 { Label ok; 145 {
146 Label ok;
143 // Check that we are always entering from Dart code. 147 // Check that we are always entering from Dart code.
144 __ LoadFromOffset(kWord, R8, THR, Thread::vm_tag_offset()); 148 __ LoadFromOffset(kWord, R8, THR, Thread::vm_tag_offset());
145 __ CompareImmediate(R8, VMTag::kDartTagId); 149 __ CompareImmediate(R8, VMTag::kDartTagId);
146 __ b(&ok, EQ); 150 __ b(&ok, EQ);
147 __ Stop("Not coming from Dart code."); 151 __ Stop("Not coming from Dart code.");
148 __ Bind(&ok); 152 __ Bind(&ok);
149 } 153 }
150 #endif 154 #endif
151 155
152 // Mark that the thread is executing native code. 156 // Mark that the thread is executing native code.
(...skipping 19 matching lines...) Expand all
172 ASSERT(argv_offset == 2 * kWordSize); 176 ASSERT(argv_offset == 2 * kWordSize);
173 // Set argv in NativeArguments: R2 already contains argv. 177 // Set argv in NativeArguments: R2 already contains argv.
174 178
175 ASSERT(retval_offset == 3 * kWordSize); 179 ASSERT(retval_offset == 3 * kWordSize);
176 // Set retval in NativeArgs. 180 // Set retval in NativeArgs.
177 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize)); 181 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize));
178 182
179 // Passing the structure by value as in runtime calls would require changing 183 // Passing the structure by value as in runtime calls would require changing
180 // Dart API for native functions. 184 // Dart API for native functions.
181 // For now, space is reserved on the stack and we pass a pointer to it. 185 // For now, space is reserved on the stack and we pass a pointer to it.
182 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3)); 186 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
183 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments. 187 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments.
184 188
185 __ mov(R1, Operand(R9)); // Pass the function entrypoint to call. 189 __ mov(R1, Operand(R9)); // Pass the function entrypoint to call.
186 190
187 // Call native function invocation wrapper or redirection via simulator. 191 // Call native function invocation wrapper or redirection via simulator.
188 __ ldr(LR, Address(THR, Thread::native_call_wrapper_entry_point_offset())); 192 __ ldr(LR, Address(THR, Thread::native_call_wrapper_entry_point_offset()));
189 __ blx(LR); 193 __ blx(LR);
190 194
191 // Mark that the thread is executing Dart code. 195 // Mark that the thread is executing Dart code.
192 __ LoadImmediate(R2, VMTag::kDartTagId); 196 __ LoadImmediate(R2, VMTag::kDartTagId);
(...skipping 20 matching lines...) Expand all
213 const intptr_t argv_offset = NativeArguments::argv_offset(); 217 const intptr_t argv_offset = NativeArguments::argv_offset();
214 const intptr_t retval_offset = NativeArguments::retval_offset(); 218 const intptr_t retval_offset = NativeArguments::retval_offset();
215 219
216 __ EnterStubFrame(); 220 __ EnterStubFrame();
217 221
218 // Save exit frame information to enable stack walking as we are about 222 // Save exit frame information to enable stack walking as we are about
219 // to transition to native code. 223 // to transition to native code.
220 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); 224 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
221 225
222 #if defined(DEBUG) 226 #if defined(DEBUG)
223 { Label ok; 227 {
228 Label ok;
224 // Check that we are always entering from Dart code. 229 // Check that we are always entering from Dart code.
225 __ LoadFromOffset(kWord, R8, THR, Thread::vm_tag_offset()); 230 __ LoadFromOffset(kWord, R8, THR, Thread::vm_tag_offset());
226 __ CompareImmediate(R8, VMTag::kDartTagId); 231 __ CompareImmediate(R8, VMTag::kDartTagId);
227 __ b(&ok, EQ); 232 __ b(&ok, EQ);
228 __ Stop("Not coming from Dart code."); 233 __ Stop("Not coming from Dart code.");
229 __ Bind(&ok); 234 __ Bind(&ok);
230 } 235 }
231 #endif 236 #endif
232 237
233 // Mark that the thread is executing native code. 238 // Mark that the thread is executing native code.
(...skipping 19 matching lines...) Expand all
253 ASSERT(argv_offset == 2 * kWordSize); 258 ASSERT(argv_offset == 2 * kWordSize);
254 // Set argv in NativeArguments: R2 already contains argv. 259 // Set argv in NativeArguments: R2 already contains argv.
255 260
256 ASSERT(retval_offset == 3 * kWordSize); 261 ASSERT(retval_offset == 3 * kWordSize);
257 // Set retval in NativeArgs. 262 // Set retval in NativeArgs.
258 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize)); 263 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize));
259 264
260 // Passing the structure by value as in runtime calls would require changing 265 // Passing the structure by value as in runtime calls would require changing
261 // Dart API for native functions. 266 // Dart API for native functions.
262 // For now, space is reserved on the stack and we pass a pointer to it. 267 // For now, space is reserved on the stack and we pass a pointer to it.
263 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3)); 268 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
264 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments. 269 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments.
265 270
266 // Call native function or redirection via simulator. 271 // Call native function or redirection via simulator.
267 __ blx(R9); 272 __ blx(R9);
268 273
269 // Mark that the thread is executing Dart code. 274 // Mark that the thread is executing Dart code.
270 __ LoadImmediate(R2, VMTag::kDartTagId); 275 __ LoadImmediate(R2, VMTag::kDartTagId);
271 __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset()); 276 __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset());
272 277
273 // Reset exit frame information in Isolate structure. 278 // Reset exit frame information in Isolate structure.
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
456 __ vstmd(DB_W, SP, D16, kNumberOfDRegisters - 16); 461 __ vstmd(DB_W, SP, D16, kNumberOfDRegisters - 16);
457 __ vstmd(DB_W, SP, D0, 16); 462 __ vstmd(DB_W, SP, D0, 16);
458 } else { 463 } else {
459 __ vstmd(DB_W, SP, D0, kNumberOfDRegisters); 464 __ vstmd(DB_W, SP, D0, kNumberOfDRegisters);
460 } 465 }
461 } else { 466 } else {
462 __ AddImmediate(SP, SP, -kNumberOfFpuRegisters * kFpuRegisterSize); 467 __ AddImmediate(SP, SP, -kNumberOfFpuRegisters * kFpuRegisterSize);
463 } 468 }
464 469
465 __ mov(R0, Operand(SP)); // Pass address of saved registers block. 470 __ mov(R0, Operand(SP)); // Pass address of saved registers block.
466 bool is_lazy = (kind == kLazyDeoptFromReturn) || 471 bool is_lazy =
467 (kind == kLazyDeoptFromThrow); 472 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
468 __ mov(R1, Operand(is_lazy ? 1 : 0)); 473 __ mov(R1, Operand(is_lazy ? 1 : 0));
469 __ ReserveAlignedFrameSpace(0); 474 __ ReserveAlignedFrameSpace(0);
470 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); 475 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
471 // Result (R0) is stack-size (FP - SP) in bytes. 476 // Result (R0) is stack-size (FP - SP) in bytes.
472 477
473 if (kind == kLazyDeoptFromReturn) { 478 if (kind == kLazyDeoptFromReturn) {
474 // Restore result into R1 temporarily. 479 // Restore result into R1 temporarily.
475 __ ldr(R1, Address(FP, saved_result_slot_from_fp * kWordSize)); 480 __ ldr(R1, Address(FP, saved_result_slot_from_fp * kWordSize));
476 } else if (kind == kLazyDeoptFromThrow) { 481 } else if (kind == kLazyDeoptFromThrow) {
477 // Restore result into R1 temporarily. 482 // Restore result into R1 temporarily.
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
638 // Input parameters: 643 // Input parameters:
639 // LR: return address. 644 // LR: return address.
640 // R1: array element type (either NULL or an instantiated type). 645 // R1: array element type (either NULL or an instantiated type).
641 // R2: array length as Smi (must be preserved). 646 // R2: array length as Smi (must be preserved).
642 // The newly allocated object is returned in R0. 647 // The newly allocated object is returned in R0.
643 void StubCode::GenerateAllocateArrayStub(Assembler* assembler) { 648 void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
644 Label slow_case; 649 Label slow_case;
645 // Compute the size to be allocated, it is based on the array length 650 // Compute the size to be allocated, it is based on the array length
646 // and is computed as: 651 // and is computed as:
647 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)). 652 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)).
648 __ MoveRegister(R3, R2); // Array length. 653 __ MoveRegister(R3, R2); // Array length.
649 // Check that length is a positive Smi. 654 // Check that length is a positive Smi.
650 __ tst(R3, Operand(kSmiTagMask)); 655 __ tst(R3, Operand(kSmiTagMask));
651 if (FLAG_use_slow_path) { 656 if (FLAG_use_slow_path) {
652 __ b(&slow_case); 657 __ b(&slow_case);
653 } else { 658 } else {
654 __ b(&slow_case, NE); 659 __ b(&slow_case, NE);
655 } 660 }
656 __ cmp(R3, Operand(0)); 661 __ cmp(R3, Operand(0));
657 __ b(&slow_case, LT); 662 __ b(&slow_case, LT);
658 663
(...skipping 11 matching lines...) Expand all
670 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi. 675 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi.
671 ASSERT(kSmiTagShift == 1); 676 ASSERT(kSmiTagShift == 1);
672 __ bic(R9, R9, Operand(kObjectAlignment - 1)); 677 __ bic(R9, R9, Operand(kObjectAlignment - 1));
673 678
674 // R9: Allocation size. 679 // R9: Allocation size.
675 Heap::Space space = Heap::kNew; 680 Heap::Space space = Heap::kNew;
676 __ ldr(R8, Address(THR, Thread::heap_offset())); 681 __ ldr(R8, Address(THR, Thread::heap_offset()));
677 // Potential new object start. 682 // Potential new object start.
678 __ ldr(R0, Address(R8, Heap::TopOffset(space))); 683 __ ldr(R0, Address(R8, Heap::TopOffset(space)));
679 __ adds(NOTFP, R0, Operand(R9)); // Potential next object start. 684 __ adds(NOTFP, R0, Operand(R9)); // Potential next object start.
680 __ b(&slow_case, CS); // Branch if unsigned overflow. 685 __ b(&slow_case, CS); // Branch if unsigned overflow.
681 686
682 // Check if the allocation fits into the remaining space. 687 // Check if the allocation fits into the remaining space.
683 // R0: potential new object start. 688 // R0: potential new object start.
684 // NOTFP: potential next object start. 689 // NOTFP: potential next object start.
685 // R9: allocation size. 690 // R9: allocation size.
686 __ ldr(R3, Address(R8, Heap::EndOffset(space))); 691 __ ldr(R3, Address(R8, Heap::EndOffset(space)));
687 __ cmp(NOTFP, Operand(R3)); 692 __ cmp(NOTFP, Operand(R3));
688 __ b(&slow_case, CS); 693 __ b(&slow_case, CS);
689 694
690 // Successfully allocated the object(s), now update top to point to 695 // Successfully allocated the object(s), now update top to point to
(...skipping 17 matching lines...) Expand all
708 // Get the class index and insert it into the tags. 713 // Get the class index and insert it into the tags.
709 // R8: size and bit tags. 714 // R8: size and bit tags.
710 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); 715 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid));
711 __ orr(R8, R8, Operand(TMP)); 716 __ orr(R8, R8, Operand(TMP));
712 __ str(R8, FieldAddress(R0, Array::tags_offset())); // Store tags. 717 __ str(R8, FieldAddress(R0, Array::tags_offset())); // Store tags.
713 } 718 }
714 719
715 // R0: new object start as a tagged pointer. 720 // R0: new object start as a tagged pointer.
716 // NOTFP: new object end address. 721 // NOTFP: new object end address.
717 // Store the type argument field. 722 // Store the type argument field.
718 __ StoreIntoObjectNoBarrier(R0, 723 __ StoreIntoObjectNoBarrier(
719 FieldAddress(R0, Array::type_arguments_offset()), 724 R0, FieldAddress(R0, Array::type_arguments_offset()), R1);
720 R1);
721 725
722 // Set the length field. 726 // Set the length field.
723 __ StoreIntoObjectNoBarrier(R0, 727 __ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, Array::length_offset()), R2);
724 FieldAddress(R0, Array::length_offset()),
725 R2);
726 728
727 // Initialize all array elements to raw_null. 729 // Initialize all array elements to raw_null.
728 // R0: new object start as a tagged pointer. 730 // R0: new object start as a tagged pointer.
729 // R3: allocation stats address. 731 // R3: allocation stats address.
730 // R8, R9: null 732 // R8, R9: null
731 // R4: iterator which initially points to the start of the variable 733 // R4: iterator which initially points to the start of the variable
732 // data area to be initialized. 734 // data area to be initialized.
733 // NOTFP: new object end address. 735 // NOTFP: new object end address.
734 // R9: allocation size. 736 // R9: allocation size.
735 NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R3, R9, space)); 737 NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R3, R9, space));
(...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after
1014 // R0: Address being stored 1016 // R0: Address being stored
1015 __ ldr(R2, FieldAddress(R0, Object::tags_offset())); 1017 __ ldr(R2, FieldAddress(R0, Object::tags_offset()));
1016 __ tst(R2, Operand(1 << RawObject::kRememberedBit)); 1018 __ tst(R2, Operand(1 << RawObject::kRememberedBit));
1017 __ b(&add_to_buffer, EQ); 1019 __ b(&add_to_buffer, EQ);
1018 __ PopList((1 << R1) | (1 << R2) | (1 << R3)); 1020 __ PopList((1 << R1) | (1 << R2) | (1 << R3));
1019 __ Ret(); 1021 __ Ret();
1020 1022
1021 __ Bind(&add_to_buffer); 1023 __ Bind(&add_to_buffer);
1022 // R2: Header word. 1024 // R2: Header word.
1023 if (TargetCPUFeatures::arm_version() == ARMv5TE) { 1025 if (TargetCPUFeatures::arm_version() == ARMv5TE) {
1024 // TODO(21263): Implement 'swp' and use it below. 1026 // TODO(21263): Implement 'swp' and use it below.
1025 #if !defined(USING_SIMULATOR) 1027 #if !defined(USING_SIMULATOR)
1026 ASSERT(OS::NumberOfAvailableProcessors() <= 1); 1028 ASSERT(OS::NumberOfAvailableProcessors() <= 1);
1027 #endif 1029 #endif
1028 __ orr(R2, R2, Operand(1 << RawObject::kRememberedBit)); 1030 __ orr(R2, R2, Operand(1 << RawObject::kRememberedBit));
1029 __ str(R2, FieldAddress(R0, Object::tags_offset())); 1031 __ str(R2, FieldAddress(R0, Object::tags_offset()));
1030 } else { 1032 } else {
1031 // Atomically set the remembered bit of the object header. 1033 // Atomically set the remembered bit of the object header.
1032 ASSERT(Object::tags_offset() == 0); 1034 ASSERT(Object::tags_offset() == 0);
1033 __ sub(R3, R0, Operand(kHeapObjectTag)); 1035 __ sub(R3, R0, Operand(kHeapObjectTag));
1034 // R3: Untagged address of header word (ldrex/strex do not support offsets). 1036 // R3: Untagged address of header word (ldrex/strex do not support offsets).
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
1140 // R9: allocation stats table. 1142 // R9: allocation stats table.
1141 // First try inlining the initialization without a loop. 1143 // First try inlining the initialization without a loop.
1142 if (instance_size < (kInlineInstanceSize * kWordSize)) { 1144 if (instance_size < (kInlineInstanceSize * kWordSize)) {
1143 // Small objects are initialized using a consecutive set of writes. 1145 // Small objects are initialized using a consecutive set of writes.
1144 intptr_t begin_offset = Instance::NextFieldOffset() - kHeapObjectTag; 1146 intptr_t begin_offset = Instance::NextFieldOffset() - kHeapObjectTag;
1145 intptr_t end_offset = instance_size - kHeapObjectTag; 1147 intptr_t end_offset = instance_size - kHeapObjectTag;
1146 // Save one move if less than two fields. 1148 // Save one move if less than two fields.
1147 if ((end_offset - begin_offset) >= (2 * kWordSize)) { 1149 if ((end_offset - begin_offset) >= (2 * kWordSize)) {
1148 __ mov(R3, Operand(R2)); 1150 __ mov(R3, Operand(R2));
1149 } 1151 }
1150 __ InitializeFieldsNoBarrierUnrolled(R0, R0, begin_offset, end_offset, 1152 __ InitializeFieldsNoBarrierUnrolled(R0, R0, begin_offset, end_offset, R2,
1151 R2, R3); 1153 R3);
1152 } else { 1154 } else {
1153 // There are more than kInlineInstanceSize(12) fields 1155 // There are more than kInlineInstanceSize(12) fields
1154 __ add(R4, R0, Operand(Instance::NextFieldOffset() - kHeapObjectTag)); 1156 __ add(R4, R0, Operand(Instance::NextFieldOffset() - kHeapObjectTag));
1155 __ mov(R3, Operand(R2)); 1157 __ mov(R3, Operand(R2));
1156 // Loop until the whole object is initialized. 1158 // Loop until the whole object is initialized.
1157 // R2: raw null. 1159 // R2: raw null.
1158 // R3: raw null. 1160 // R3: raw null.
1159 // R0: new object (tagged). 1161 // R0: new object (tagged).
1160 // R1: next object start. 1162 // R1: next object start.
1161 // R4: next word to be initialized. 1163 // R4: next word to be initialized.
(...skipping 22 matching lines...) Expand all
1184 if (is_cls_parameterized) { 1186 if (is_cls_parameterized) {
1185 // Load the type arguments. 1187 // Load the type arguments.
1186 __ ldr(R4, Address(SP, 0)); 1188 __ ldr(R4, Address(SP, 0));
1187 } 1189 }
1188 // If is_cls_parameterized: 1190 // If is_cls_parameterized:
1189 // R4: new object type arguments. 1191 // R4: new object type arguments.
1190 // Create a stub frame as we are pushing some objects on the stack before 1192 // Create a stub frame as we are pushing some objects on the stack before
1191 // calling into the runtime. 1193 // calling into the runtime.
1192 __ EnterStubFrame(); // Uses pool pointer to pass cls to runtime. 1194 __ EnterStubFrame(); // Uses pool pointer to pass cls to runtime.
1193 __ LoadObject(R2, Object::null_object()); 1195 __ LoadObject(R2, Object::null_object());
1194 __ Push(R2); // Setup space on stack for return value. 1196 __ Push(R2); // Setup space on stack for return value.
1195 __ PushObject(cls); // Push class of object to be allocated. 1197 __ PushObject(cls); // Push class of object to be allocated.
1196 if (is_cls_parameterized) { 1198 if (is_cls_parameterized) {
1197 // Push type arguments. 1199 // Push type arguments.
1198 __ Push(R4); 1200 __ Push(R4);
1199 } else { 1201 } else {
1200 // Push null type arguments. 1202 // Push null type arguments.
1201 __ Push(R2); 1203 __ Push(R2);
1202 } 1204 }
1203 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. 1205 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object.
1204 __ Drop(2); // Pop arguments. 1206 __ Drop(2); // Pop arguments.
1205 __ Pop(R0); // Pop result (newly allocated object). 1207 __ Pop(R0); // Pop result (newly allocated object).
1206 // R0: new object 1208 // R0: new object
1207 // Restore the frame pointer. 1209 // Restore the frame pointer.
1208 __ LeaveStubFrame(); 1210 __ LeaveStubFrame();
1209 __ Ret(); 1211 __ Ret();
1210 } 1212 }
1211 1213
1212 1214
1213 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function 1215 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
1214 // from the entry code of a dart function after an error in passed argument 1216 // from the entry code of a dart function after an error in passed argument
(...skipping 29 matching lines...) Expand all
1244 // R8: function object. 1246 // R8: function object.
1245 // R9: inline cache data object. 1247 // R9: inline cache data object.
1246 // Cannot use function object from ICData as it may be the inlined 1248 // Cannot use function object from ICData as it may be the inlined
1247 // function and not the top-scope function. 1249 // function and not the top-scope function.
1248 void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) { 1250 void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) {
1249 Register ic_reg = R9; 1251 Register ic_reg = R9;
1250 Register func_reg = R8; 1252 Register func_reg = R8;
1251 if (FLAG_trace_optimized_ic_calls) { 1253 if (FLAG_trace_optimized_ic_calls) {
1252 __ EnterStubFrame(); 1254 __ EnterStubFrame();
1253 __ PushList((1 << R9) | (1 << R8)); // Preserve. 1255 __ PushList((1 << R9) | (1 << R8)); // Preserve.
1254 __ Push(ic_reg); // Argument. 1256 __ Push(ic_reg); // Argument.
1255 __ Push(func_reg); // Argument. 1257 __ Push(func_reg); // Argument.
1256 __ CallRuntime(kTraceICCallRuntimeEntry, 2); 1258 __ CallRuntime(kTraceICCallRuntimeEntry, 2);
1257 __ Drop(2); // Discard argument; 1259 __ Drop(2); // Discard argument;
1258 __ PopList((1 << R9) | (1 << R8)); // Restore. 1260 __ PopList((1 << R9) | (1 << R8)); // Restore.
1259 __ LeaveStubFrame(); 1261 __ LeaveStubFrame();
1260 } 1262 }
1261 __ ldr(NOTFP, FieldAddress(func_reg, Function::usage_counter_offset())); 1263 __ ldr(NOTFP, FieldAddress(func_reg, Function::usage_counter_offset()));
1262 __ add(NOTFP, NOTFP, Operand(1)); 1264 __ add(NOTFP, NOTFP, Operand(1));
1263 __ str(NOTFP, FieldAddress(func_reg, Function::usage_counter_offset())); 1265 __ str(NOTFP, FieldAddress(func_reg, Function::usage_counter_offset()));
1264 } 1266 }
1265 1267
1266 1268
1267 // Loads function into 'temp_reg'. 1269 // Loads function into 'temp_reg'.
(...skipping 21 matching lines...) Expand all
1289 intptr_t num_args, 1291 intptr_t num_args,
1290 Label* not_smi_or_overflow) { 1292 Label* not_smi_or_overflow) {
1291 __ Comment("Fast Smi op"); 1293 __ Comment("Fast Smi op");
1292 __ ldr(R0, Address(SP, 0 * kWordSize)); 1294 __ ldr(R0, Address(SP, 0 * kWordSize));
1293 __ ldr(R1, Address(SP, 1 * kWordSize)); 1295 __ ldr(R1, Address(SP, 1 * kWordSize));
1294 __ orr(TMP, R0, Operand(R1)); 1296 __ orr(TMP, R0, Operand(R1));
1295 __ tst(TMP, Operand(kSmiTagMask)); 1297 __ tst(TMP, Operand(kSmiTagMask));
1296 __ b(not_smi_or_overflow, NE); 1298 __ b(not_smi_or_overflow, NE);
1297 switch (kind) { 1299 switch (kind) {
1298 case Token::kADD: { 1300 case Token::kADD: {
1299 __ adds(R0, R1, Operand(R0)); // Adds. 1301 __ adds(R0, R1, Operand(R0)); // Adds.
1300 __ b(not_smi_or_overflow, VS); // Branch if overflow. 1302 __ b(not_smi_or_overflow, VS); // Branch if overflow.
1301 break; 1303 break;
1302 } 1304 }
1303 case Token::kSUB: { 1305 case Token::kSUB: {
1304 __ subs(R0, R1, Operand(R0)); // Subtract. 1306 __ subs(R0, R1, Operand(R0)); // Subtract.
1305 __ b(not_smi_or_overflow, VS); // Branch if overflow. 1307 __ b(not_smi_or_overflow, VS); // Branch if overflow.
1306 break; 1308 break;
1307 } 1309 }
1308 case Token::kEQ: { 1310 case Token::kEQ: {
1309 __ cmp(R0, Operand(R1)); 1311 __ cmp(R0, Operand(R1));
1310 __ LoadObject(R0, Bool::True(), EQ); 1312 __ LoadObject(R0, Bool::True(), EQ);
1311 __ LoadObject(R0, Bool::False(), NE); 1313 __ LoadObject(R0, Bool::False(), NE);
1312 break; 1314 break;
1313 } 1315 }
1314 default: UNIMPLEMENTED(); 1316 default:
1317 UNIMPLEMENTED();
1315 } 1318 }
1316 // R9: IC data object (preserved). 1319 // R9: IC data object (preserved).
1317 __ ldr(R8, FieldAddress(R9, ICData::ic_data_offset())); 1320 __ ldr(R8, FieldAddress(R9, ICData::ic_data_offset()));
1318 // R8: ic_data_array with check entries: classes and target functions. 1321 // R8: ic_data_array with check entries: classes and target functions.
1319 __ AddImmediate(R8, R8, Array::data_offset() - kHeapObjectTag); 1322 __ AddImmediate(R8, R8, Array::data_offset() - kHeapObjectTag);
1320 // R8: points directly to the first ic data array element. 1323 // R8: points directly to the first ic data array element.
1321 #if defined(DEBUG) 1324 #if defined(DEBUG)
1322 // Check that first entry is for Smi/Smi. 1325 // Check that first entry is for Smi/Smi.
1323 Label error, ok; 1326 Label error, ok;
1324 const intptr_t imm_smi_cid = reinterpret_cast<intptr_t>(Smi::New(kSmiCid)); 1327 const intptr_t imm_smi_cid = reinterpret_cast<intptr_t>(Smi::New(kSmiCid));
1325 __ ldr(R1, Address(R8, 0)); 1328 __ ldr(R1, Address(R8, 0));
1326 __ CompareImmediate(R1, imm_smi_cid); 1329 __ CompareImmediate(R1, imm_smi_cid);
1327 __ b(&error, NE); 1330 __ b(&error, NE);
1328 __ ldr(R1, Address(R8, kWordSize)); 1331 __ ldr(R1, Address(R8, kWordSize));
1329 __ CompareImmediate(R1, imm_smi_cid); 1332 __ CompareImmediate(R1, imm_smi_cid);
1330 __ b(&ok, EQ); 1333 __ b(&ok, EQ);
(...skipping 25 matching lines...) Expand all
1356 // - Match not found -> jump to IC miss. 1359 // - Match not found -> jump to IC miss.
1357 void StubCode::GenerateNArgsCheckInlineCacheStub( 1360 void StubCode::GenerateNArgsCheckInlineCacheStub(
1358 Assembler* assembler, 1361 Assembler* assembler,
1359 intptr_t num_args, 1362 intptr_t num_args,
1360 const RuntimeEntry& handle_ic_miss, 1363 const RuntimeEntry& handle_ic_miss,
1361 Token::Kind kind, 1364 Token::Kind kind,
1362 bool optimized) { 1365 bool optimized) {
1363 __ CheckCodePointer(); 1366 __ CheckCodePointer();
1364 ASSERT(num_args > 0); 1367 ASSERT(num_args > 0);
1365 #if defined(DEBUG) 1368 #if defined(DEBUG)
1366 { Label ok; 1369 {
1370 Label ok;
1367 // Check that the IC data array has NumArgsTested() == num_args. 1371 // Check that the IC data array has NumArgsTested() == num_args.
1368 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. 1372 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
1369 __ ldr(R8, FieldAddress(R9, ICData::state_bits_offset())); 1373 __ ldr(R8, FieldAddress(R9, ICData::state_bits_offset()));
1370 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. 1374 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
1371 __ and_(R8, R8, Operand(ICData::NumArgsTestedMask())); 1375 __ and_(R8, R8, Operand(ICData::NumArgsTestedMask()));
1372 __ CompareImmediate(R8, num_args); 1376 __ CompareImmediate(R8, num_args);
1373 __ b(&ok, EQ); 1377 __ b(&ok, EQ);
1374 __ Stop("Incorrect stub for IC data"); 1378 __ Stop("Incorrect stub for IC data");
1375 __ Bind(&ok); 1379 __ Bind(&ok);
1376 } 1380 }
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1436 } 1440 }
1437 __ Bind(&update); 1441 __ Bind(&update);
1438 // Reload receiver class ID. It has not been destroyed when num_args == 1. 1442 // Reload receiver class ID. It has not been destroyed when num_args == 1.
1439 if (num_args > 1) { 1443 if (num_args > 1) {
1440 __ ldr(R0, Address(SP, NOTFP, LSL, 1)); 1444 __ ldr(R0, Address(SP, NOTFP, LSL, 1));
1441 __ LoadTaggedClassIdMayBeSmi(R0, R0); 1445 __ LoadTaggedClassIdMayBeSmi(R0, R0);
1442 } 1446 }
1443 1447
1444 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize; 1448 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize;
1445 __ AddImmediate(R8, entry_size); // Next entry. 1449 __ AddImmediate(R8, entry_size); // Next entry.
1446 __ ldr(R1, Address(R8, 0)); // Next class ID. 1450 __ ldr(R1, Address(R8, 0)); // Next class ID.
1447 1451
1448 __ Bind(&test); 1452 __ Bind(&test);
1449 __ CompareImmediate(R1, Smi::RawValue(kIllegalCid)); // Done? 1453 __ CompareImmediate(R1, Smi::RawValue(kIllegalCid)); // Done?
1450 __ b(&loop, NE); 1454 __ b(&loop, NE);
1451 1455
1452 __ Comment("IC miss"); 1456 __ Comment("IC miss");
1453 // Compute address of arguments. 1457 // Compute address of arguments.
1454 // NOTFP: argument_count - 1 (smi). 1458 // NOTFP: argument_count - 1 (smi).
1455 __ add(NOTFP, SP, Operand(NOTFP, LSL, 1)); // NOTFP is Smi. 1459 __ add(NOTFP, SP, Operand(NOTFP, LSL, 1)); // NOTFP is Smi.
1456 // NOTFP: address of receiver. 1460 // NOTFP: address of receiver.
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
1522 // LR: return address. 1526 // LR: return address.
1523 // R9: inline cache data object. 1527 // R9: inline cache data object.
1524 // Inline cache data object structure: 1528 // Inline cache data object structure:
1525 // 0: function-name 1529 // 0: function-name
1526 // 1: N, number of arguments checked. 1530 // 1: N, number of arguments checked.
1527 // 2 .. (length - 1): group of checks, each check containing: 1531 // 2 .. (length - 1): group of checks, each check containing:
1528 // - N classes. 1532 // - N classes.
1529 // - 1 target function. 1533 // - 1 target function.
1530 void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) { 1534 void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) {
1531 GenerateUsageCounterIncrement(assembler, R8); 1535 GenerateUsageCounterIncrement(assembler, R8);
1532 GenerateNArgsCheckInlineCacheStub(assembler, 1536 GenerateNArgsCheckInlineCacheStub(
1533 1, 1537 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL);
1534 kInlineCacheMissHandlerOneArgRuntimeEntry,
1535 Token::kILLEGAL);
1536 } 1538 }
1537 1539
1538 1540
1539 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { 1541 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) {
1540 GenerateUsageCounterIncrement(assembler, R8); 1542 GenerateUsageCounterIncrement(assembler, R8);
1541 GenerateNArgsCheckInlineCacheStub(assembler, 1543 GenerateNArgsCheckInlineCacheStub(assembler, 2,
1542 2, 1544 kInlineCacheMissHandlerTwoArgsRuntimeEntry,
1543 kInlineCacheMissHandlerTwoArgsRuntimeEntry, 1545 Token::kILLEGAL);
1544 Token::kILLEGAL);
1545 } 1546 }
1546 1547
1547 1548
1548 void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) { 1549 void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
1549 GenerateUsageCounterIncrement(assembler, R8); 1550 GenerateUsageCounterIncrement(assembler, R8);
1550 GenerateNArgsCheckInlineCacheStub(assembler, 1551 GenerateNArgsCheckInlineCacheStub(
1551 2, 1552 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD);
1552 kInlineCacheMissHandlerTwoArgsRuntimeEntry,
1553 Token::kADD);
1554 } 1553 }
1555 1554
1556 1555
1557 void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) { 1556 void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) {
1558 GenerateUsageCounterIncrement(assembler, R8); 1557 GenerateUsageCounterIncrement(assembler, R8);
1559 GenerateNArgsCheckInlineCacheStub(assembler, 2, 1558 GenerateNArgsCheckInlineCacheStub(
1560 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB); 1559 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB);
1561 } 1560 }
1562 1561
1563 1562
1564 void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) { 1563 void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
1565 GenerateUsageCounterIncrement(assembler, R8); 1564 GenerateUsageCounterIncrement(assembler, R8);
1566 GenerateNArgsCheckInlineCacheStub(assembler, 2, 1565 GenerateNArgsCheckInlineCacheStub(
1567 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ); 1566 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ);
1568 } 1567 }
1569 1568
1570 1569
1571 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( 1570 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub(
1572 Assembler* assembler) { 1571 Assembler* assembler) {
1573 GenerateOptimizedUsageCounterIncrement(assembler); 1572 GenerateOptimizedUsageCounterIncrement(assembler);
1574 GenerateNArgsCheckInlineCacheStub(assembler, 1, 1573 GenerateNArgsCheckInlineCacheStub(assembler, 1,
1575 kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, 1574 kInlineCacheMissHandlerOneArgRuntimeEntry,
1576 true /* optimized */); 1575 Token::kILLEGAL, true /* optimized */);
1577 } 1576 }
1578 1577
1579 1578
1580 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub( 1579 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub(
1581 Assembler* assembler) { 1580 Assembler* assembler) {
1582 GenerateOptimizedUsageCounterIncrement(assembler); 1581 GenerateOptimizedUsageCounterIncrement(assembler);
1583 GenerateNArgsCheckInlineCacheStub(assembler, 2, 1582 GenerateNArgsCheckInlineCacheStub(assembler, 2,
1584 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL, 1583 kInlineCacheMissHandlerTwoArgsRuntimeEntry,
1585 true /* optimized */); 1584 Token::kILLEGAL, true /* optimized */);
1586 } 1585 }
1587 1586
1588 1587
1589 // Intermediary stub between a static call and its target. ICData contains 1588 // Intermediary stub between a static call and its target. ICData contains
1590 // the target function and the call count. 1589 // the target function and the call count.
1591 // R9: ICData 1590 // R9: ICData
1592 void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) { 1591 void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) {
1593 GenerateUsageCounterIncrement(assembler, R8); 1592 GenerateUsageCounterIncrement(assembler, R8);
1594 #if defined(DEBUG) 1593 #if defined(DEBUG)
1595 { Label ok; 1594 {
1595 Label ok;
1596 // Check that the IC data array has NumArgsTested() == 0. 1596 // Check that the IC data array has NumArgsTested() == 0.
1597 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. 1597 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
1598 __ ldr(R8, FieldAddress(R9, ICData::state_bits_offset())); 1598 __ ldr(R8, FieldAddress(R9, ICData::state_bits_offset()));
1599 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. 1599 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
1600 __ and_(R8, R8, Operand(ICData::NumArgsTestedMask())); 1600 __ and_(R8, R8, Operand(ICData::NumArgsTestedMask()));
1601 __ CompareImmediate(R8, 0); 1601 __ CompareImmediate(R8, 0);
1602 __ b(&ok, EQ); 1602 __ b(&ok, EQ);
1603 __ Stop("Incorrect IC data for unoptimized static call"); 1603 __ Stop("Incorrect IC data for unoptimized static call");
1604 __ Bind(&ok); 1604 __ Bind(&ok);
1605 } 1605 }
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
1655 1655
1656 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { 1656 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) {
1657 GenerateUsageCounterIncrement(assembler, R8); 1657 GenerateUsageCounterIncrement(assembler, R8);
1658 GenerateNArgsCheckInlineCacheStub( 1658 GenerateNArgsCheckInlineCacheStub(
1659 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); 1659 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL);
1660 } 1660 }
1661 1661
1662 1662
1663 void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) { 1663 void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) {
1664 GenerateUsageCounterIncrement(assembler, R8); 1664 GenerateUsageCounterIncrement(assembler, R8);
1665 GenerateNArgsCheckInlineCacheStub(assembler, 2, 1665 GenerateNArgsCheckInlineCacheStub(
1666 kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL); 1666 assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL);
1667 } 1667 }
1668 1668
1669 1669
1670 // Stub for compiling a function and jumping to the compiled code. 1670 // Stub for compiling a function and jumping to the compiled code.
1671 // R9: IC-Data (for methods). 1671 // R9: IC-Data (for methods).
1672 // R4: Arguments descriptor. 1672 // R4: Arguments descriptor.
1673 // R0: Function. 1673 // R0: Function.
1674 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { 1674 void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
1675 // Preserve arg desc. and IC data object. 1675 // Preserve arg desc. and IC data object.
1676 __ EnterStubFrame(); 1676 __ EnterStubFrame();
1677 __ PushList((1 << R4) | (1 << R9)); 1677 __ PushList((1 << R4) | (1 << R9));
1678 __ Push(R0); // Pass function. 1678 __ Push(R0); // Pass function.
1679 __ CallRuntime(kCompileFunctionRuntimeEntry, 1); 1679 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
1680 __ Pop(R0); // Restore argument. 1680 __ Pop(R0); // Restore argument.
1681 __ PopList((1 << R4) | (1 << R9)); // Restore arg desc. and IC data. 1681 __ PopList((1 << R4) | (1 << R9)); // Restore arg desc. and IC data.
1682 __ LeaveStubFrame(); 1682 __ LeaveStubFrame();
1683 1683
1684 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); 1684 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1685 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); 1685 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
1686 __ bx(R2); 1686 __ bx(R2);
1687 } 1687 }
1688 1688
1689 1689
1690 // R9: Contains an ICData. 1690 // R9: Contains an ICData.
(...skipping 18 matching lines...) Expand all
1709 __ PushList((1 << R0)); 1709 __ PushList((1 << R0));
1710 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); 1710 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
1711 __ PopList((1 << CODE_REG)); 1711 __ PopList((1 << CODE_REG));
1712 __ LeaveStubFrame(); 1712 __ LeaveStubFrame();
1713 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset())); 1713 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
1714 __ bx(R0); 1714 __ bx(R0);
1715 } 1715 }
1716 1716
1717 1717
1718 // Called only from unoptimized code. All relevant registers have been saved. 1718 // Called only from unoptimized code. All relevant registers have been saved.
1719 void StubCode::GenerateDebugStepCheckStub( 1719 void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) {
1720 Assembler* assembler) {
1721 // Check single stepping. 1720 // Check single stepping.
1722 Label stepping, done_stepping; 1721 Label stepping, done_stepping;
1723 __ LoadIsolate(R1); 1722 __ LoadIsolate(R1);
1724 __ ldrb(R1, Address(R1, Isolate::single_step_offset())); 1723 __ ldrb(R1, Address(R1, Isolate::single_step_offset()));
1725 __ CompareImmediate(R1, 0); 1724 __ CompareImmediate(R1, 0);
1726 __ b(&stepping, NE); 1725 __ b(&stepping, NE);
1727 __ Bind(&done_stepping); 1726 __ Bind(&done_stepping);
1728 __ Ret(); 1727 __ Ret();
1729 1728
1730 __ Bind(&stepping); 1729 __ Bind(&stepping);
(...skipping 11 matching lines...) Expand all
1742 // R2: cache array. 1741 // R2: cache array.
1743 // Result in R1: null -> not found, otherwise result (true or false). 1742 // Result in R1: null -> not found, otherwise result (true or false).
1744 static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) { 1743 static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
1745 ASSERT((1 <= n) && (n <= 3)); 1744 ASSERT((1 <= n) && (n <= 3));
1746 if (n > 1) { 1745 if (n > 1) {
1747 // Get instance type arguments. 1746 // Get instance type arguments.
1748 __ LoadClass(R3, R0, R4); 1747 __ LoadClass(R3, R0, R4);
1749 // Compute instance type arguments into R4. 1748 // Compute instance type arguments into R4.
1750 Label has_no_type_arguments; 1749 Label has_no_type_arguments;
1751 __ LoadObject(R4, Object::null_object()); 1750 __ LoadObject(R4, Object::null_object());
1752 __ ldr(R9, FieldAddress(R3, 1751 __ ldr(R9, FieldAddress(
1753 Class::type_arguments_field_offset_in_words_offset())); 1752 R3, Class::type_arguments_field_offset_in_words_offset()));
1754 __ CompareImmediate(R9, Class::kNoTypeArguments); 1753 __ CompareImmediate(R9, Class::kNoTypeArguments);
1755 __ b(&has_no_type_arguments, EQ); 1754 __ b(&has_no_type_arguments, EQ);
1756 __ add(R9, R0, Operand(R9, LSL, 2)); 1755 __ add(R9, R0, Operand(R9, LSL, 2));
1757 __ ldr(R4, FieldAddress(R9, 0)); 1756 __ ldr(R4, FieldAddress(R9, 0));
1758 __ Bind(&has_no_type_arguments); 1757 __ Bind(&has_no_type_arguments);
1759 } 1758 }
1760 __ LoadClassId(R3, R0); 1759 __ LoadClassId(R3, R0);
1761 // R0: instance. 1760 // R0: instance.
1762 // R1: instantiator type arguments or NULL. 1761 // R1: instantiator type arguments or NULL.
1763 // R2: SubtypeTestCache. 1762 // R2: SubtypeTestCache.
(...skipping 21 matching lines...) Expand all
1785 } else { 1784 } else {
1786 __ b(&next_iteration, NE); 1785 __ b(&next_iteration, NE);
1787 __ ldr(R9, 1786 __ ldr(R9,
1788 Address(R2, kWordSize * SubtypeTestCache::kInstanceTypeArguments)); 1787 Address(R2, kWordSize * SubtypeTestCache::kInstanceTypeArguments));
1789 __ cmp(R9, Operand(R4)); 1788 __ cmp(R9, Operand(R4));
1790 if (n == 2) { 1789 if (n == 2) {
1791 __ b(&found, EQ); 1790 __ b(&found, EQ);
1792 } else { 1791 } else {
1793 __ b(&next_iteration, NE); 1792 __ b(&next_iteration, NE);
1794 __ ldr(R9, Address(R2, kWordSize * 1793 __ ldr(R9, Address(R2, kWordSize *
1795 SubtypeTestCache::kInstantiatorTypeArguments)); 1794 SubtypeTestCache::kInstantiatorTypeArguments));
1796 __ cmp(R9, Operand(R1)); 1795 __ cmp(R9, Operand(R1));
1797 __ b(&found, EQ); 1796 __ b(&found, EQ);
1798 } 1797 }
1799 } 1798 }
1800 __ Bind(&next_iteration); 1799 __ Bind(&next_iteration);
1801 __ AddImmediate(R2, kWordSize * SubtypeTestCache::kTestEntryLength); 1800 __ AddImmediate(R2, kWordSize * SubtypeTestCache::kTestEntryLength);
1802 __ b(&loop); 1801 __ b(&loop);
1803 // Fall through to not found. 1802 // Fall through to not found.
1804 __ Bind(&not_found); 1803 __ Bind(&not_found);
1805 __ LoadObject(R1, Object::null_object()); 1804 __ LoadObject(R1, Object::null_object());
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1856 // R0: program_counter. 1855 // R0: program_counter.
1857 // R1: stack_pointer. 1856 // R1: stack_pointer.
1858 // R2: frame_pointer. 1857 // R2: frame_pointer.
1859 // R3: error object. 1858 // R3: error object.
1860 // SP + 0: address of stacktrace object. 1859 // SP + 0: address of stacktrace object.
1861 // SP + 4: thread. 1860 // SP + 4: thread.
1862 // Does not return. 1861 // Does not return.
1863 void StubCode::GenerateJumpToExceptionHandlerStub(Assembler* assembler) { 1862 void StubCode::GenerateJumpToExceptionHandlerStub(Assembler* assembler) {
1864 ASSERT(kExceptionObjectReg == R0); 1863 ASSERT(kExceptionObjectReg == R0);
1865 ASSERT(kStackTraceObjectReg == R1); 1864 ASSERT(kStackTraceObjectReg == R1);
1866 __ mov(IP, Operand(R1)); // Copy Stack pointer into IP. 1865 __ mov(IP, Operand(R1)); // Copy Stack pointer into IP.
1867 __ mov(LR, Operand(R0)); // Program counter. 1866 __ mov(LR, Operand(R0)); // Program counter.
1868 __ mov(R0, Operand(R3)); // Exception object. 1867 __ mov(R0, Operand(R3)); // Exception object.
1869 __ ldr(R1, Address(SP, 0)); // StackTrace object. 1868 __ ldr(R1, Address(SP, 0)); // StackTrace object.
1870 __ ldr(THR, Address(SP, 4)); // Thread. 1869 __ ldr(THR, Address(SP, 4)); // Thread.
1871 __ mov(FP, Operand(R2)); // Frame_pointer. 1870 __ mov(FP, Operand(R2)); // Frame_pointer.
1872 __ mov(SP, Operand(IP)); // Set Stack pointer. 1871 __ mov(SP, Operand(IP)); // Set Stack pointer.
1873 // Set the tag. 1872 // Set the tag.
1874 __ LoadImmediate(R2, VMTag::kDartTagId); 1873 __ LoadImmediate(R2, VMTag::kDartTagId);
1875 __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset()); 1874 __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset());
1876 // Clear top exit frame. 1875 // Clear top exit frame.
1877 __ LoadImmediate(R2, 0); 1876 __ LoadImmediate(R2, 0);
1878 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); 1877 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
1879 // Restore the pool pointer. 1878 // Restore the pool pointer.
1880 __ RestoreCodePointer(); 1879 __ RestoreCodePointer();
1881 __ LoadPoolPointer(); 1880 __ LoadPoolPointer();
1882 __ bx(LR); // Jump to the exception handler code. 1881 __ bx(LR); // Jump to the exception handler code.
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
1951 __ cmp(temp, Operand(IP)); 1950 __ cmp(temp, Operand(IP));
1952 __ b(&done); 1951 __ b(&done);
1953 1952
1954 __ Bind(&check_bigint); 1953 __ Bind(&check_bigint);
1955 __ CompareClassId(left, kBigintCid, temp); 1954 __ CompareClassId(left, kBigintCid, temp);
1956 __ b(&reference_compare, NE); 1955 __ b(&reference_compare, NE);
1957 __ CompareClassId(right, kBigintCid, temp); 1956 __ CompareClassId(right, kBigintCid, temp);
1958 __ b(&done, NE); 1957 __ b(&done, NE);
1959 __ EnterStubFrame(); 1958 __ EnterStubFrame();
1960 __ ReserveAlignedFrameSpace(2 * kWordSize); 1959 __ ReserveAlignedFrameSpace(2 * kWordSize);
1961 __ stm(IA, SP, (1 << R0) | (1 << R1)); 1960 __ stm(IA, SP, (1 << R0) | (1 << R1));
1962 __ CallRuntime(kBigintCompareRuntimeEntry, 2); 1961 __ CallRuntime(kBigintCompareRuntimeEntry, 2);
1963 // Result in R0, 0 means equal. 1962 // Result in R0, 0 means equal.
1964 __ LeaveStubFrame(); 1963 __ LeaveStubFrame();
1965 __ cmp(R0, Operand(0)); 1964 __ cmp(R0, Operand(0));
1966 __ b(&done); 1965 __ b(&done);
1967 1966
1968 __ Bind(&reference_compare); 1967 __ Bind(&reference_compare);
1969 __ cmp(left, Operand(right)); 1968 __ cmp(left, Operand(right));
1970 __ Bind(&done); 1969 __ Bind(&done);
1971 } 1970 }
(...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after
2242 } 2241 }
2243 2242
2244 2243
2245 void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) { 2244 void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) {
2246 __ bkpt(0); 2245 __ bkpt(0);
2247 } 2246 }
2248 2247
2249 } // namespace dart 2248 } // namespace dart
2250 2249
2251 #endif // defined TARGET_ARCH_ARM 2250 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/stub_code.cc ('k') | runtime/vm/stub_code_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698