Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(101)

Side by Side Diff: runtime/vm/stub_code_arm64.cc

Issue 253623003: Enables all startup code for arm64. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/stack_frame_arm64.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" 5 #include "vm/globals.h"
6 #if defined(TARGET_ARCH_ARM64) 6 #if defined(TARGET_ARCH_ARM64)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/code_generator.h" 9 #include "vm/code_generator.h"
10 #include "vm/compiler.h" 10 #include "vm/compiler.h"
(...skipping 26 matching lines...) Expand all
37 37
38 __ SetPrologueOffset(); 38 __ SetPrologueOffset();
39 __ Comment("CallToRuntimeStub"); 39 __ Comment("CallToRuntimeStub");
40 __ EnterFrame(0); 40 __ EnterFrame(0);
41 41
42 // Load current Isolate pointer from Context structure into A0. 42 // Load current Isolate pointer from Context structure into A0.
43 __ LoadFieldFromOffset(R0, CTX, Context::isolate_offset()); 43 __ LoadFieldFromOffset(R0, CTX, Context::isolate_offset());
44 44
45 // Save exit frame information to enable stack walking as we are about 45 // Save exit frame information to enable stack walking as we are about
46 // to transition to Dart VM C++ code. 46 // to transition to Dart VM C++ code.
47 __ StoreToOffset(SP, R0, Isolate::top_exit_frame_info_offset()); 47 __ mov(TMP, SP); // Can't directly store SP.
48 __ StoreToOffset(TMP, R0, Isolate::top_exit_frame_info_offset());
48 49
49 // Save current Context pointer into Isolate structure. 50 // Save current Context pointer into Isolate structure.
50 __ StoreToOffset(CTX, R0, Isolate::top_context_offset()); 51 __ StoreToOffset(CTX, R0, Isolate::top_context_offset());
51 52
52 // Cache Isolate pointer into CTX while executing runtime code. 53 // Cache Isolate pointer into CTX while executing runtime code.
53 __ mov(CTX, R0); 54 __ mov(CTX, R0);
54 55
55 #if defined(DEBUG) 56 #if defined(DEBUG)
56 { Label ok; 57 { Label ok;
57 // Check that we are always entering from Dart code. 58 // Check that we are always entering from Dart code.
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
126 void StubCode::GeneratePrintStopMessageStub(Assembler* assembler) { 127 void StubCode::GeneratePrintStopMessageStub(Assembler* assembler) {
127 __ Stop("GeneratePrintStopMessageStub"); 128 __ Stop("GeneratePrintStopMessageStub");
128 } 129 }
129 130
130 131
131 void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { 132 void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) {
132 __ Stop("GenerateCallNativeCFunctionStub"); 133 __ Stop("GenerateCallNativeCFunctionStub");
133 } 134 }
134 135
135 136
137 // Input parameters:
138 // LR : return address.
139 // SP : address of return value.
140 // R5 : address of the native function to call.
141 // R2 : address of first argument in argument array.
142 // R1 : argc_tag including number of arguments and function kind.
136 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { 143 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) {
137 __ Stop("GenerateCallBootstrapCFunctionStub"); 144 const intptr_t isolate_offset = NativeArguments::isolate_offset();
145 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
146 const intptr_t argv_offset = NativeArguments::argv_offset();
147 const intptr_t retval_offset = NativeArguments::retval_offset();
148
149 __ EnterFrame(0);
150
151 // Load current Isolate pointer from Context structure into R0.
152 __ LoadFieldFromOffset(R0, CTX, Context::isolate_offset());
153
154 // Save exit frame information to enable stack walking as we are about
155 // to transition to native code.
156 __ mov(TMP, SP); // Can't store SP directly, first copy to TMP.
157 __ StoreToOffset(TMP, R0, Isolate::top_exit_frame_info_offset());
158
159 // Save current Context pointer into Isolate structure.
160 __ StoreToOffset(CTX, R0, Isolate::top_context_offset());
161
162 // Cache Isolate pointer into CTX while executing native code.
163 __ mov(CTX, R0);
164
165 #if defined(DEBUG)
166 { Label ok;
167 // Check that we are always entering from Dart code.
168 __ LoadFromOffset(R6, CTX, Isolate::vm_tag_offset());
169 __ CompareImmediate(R6, VMTag::kScriptTagId, PP);
170 __ b(&ok, EQ);
171 __ Stop("Not coming from Dart code.");
172 __ Bind(&ok);
173 }
174 #endif
175
176 // Mark that the isolate is executing Native code.
177 __ StoreToOffset(R5, CTX, Isolate::vm_tag_offset());
178
179 // Reserve space for the native arguments structure passed on the stack (the
180 // outgoing pointer parameter to the native arguments structure is passed in
181 // R0) and align frame before entering the C++ world.
182 __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
183
184 // Initialize NativeArguments structure and call native function.
185 // Registers R0, R1, R2, and R3 are used.
186
187 ASSERT(isolate_offset == 0 * kWordSize);
188 // Set isolate in NativeArgs: R0 already contains CTX.
189
190 // There are no native calls to closures, so we do not need to set the tag
191 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
192 ASSERT(argc_tag_offset == 1 * kWordSize);
193 // Set argc in NativeArguments: R1 already contains argc.
194
195 ASSERT(argv_offset == 2 * kWordSize);
196 // Set argv in NativeArguments: R2 already contains argv.
197
198 // Set retval in NativeArgs.
199 ASSERT(retval_offset == 3 * kWordSize);
200 __ AddImmediate(R3, FP, 2 * kWordSize, PP);
201
202 // TODO(regis): Should we pass the structure by value as in runtime calls?
203 // It would require changing Dart API for native functions.
204 // For now, space is reserved on the stack and we pass a pointer to it.
205 __ StoreToOffset(R0, SP, isolate_offset);
206 __ StoreToOffset(R1, SP, argc_tag_offset);
207 __ StoreToOffset(R2, SP, argv_offset);
208 __ StoreToOffset(R3, SP, retval_offset);
209 __ mov(R0, SP); // Pass the pointer to the NativeArguments.
210
211 // Call native function or redirection via simulator.
212 __ blr(R5);
213
214 // Mark that the isolate is executing Dart code.
215 __ LoadImmediate(R2, VMTag::kScriptTagId, PP);
216 __ StoreToOffset(R2, CTX, Isolate::vm_tag_offset());
217
218 // Reset exit frame information in Isolate structure.
219 __ LoadImmediate(R2, 0, PP);
220 __ StoreToOffset(R2, CTX, Isolate::top_exit_frame_info_offset());
221
222 // Load Context pointer from Isolate structure into R2.
223 __ LoadFromOffset(R2, CTX, Isolate::top_context_offset());
224
225 // Reset Context pointer in Isolate structure.
226 __ LoadObject(R3, Object::null_object(), PP);
227 __ StoreToOffset(R3, CTX, Isolate::top_context_offset());
228
229 // Cache Context pointer into CTX while executing Dart code.
230 __ mov(CTX, R2);
231
232 __ LeaveFrame();
233 __ ret();
138 } 234 }
139 235
140 236
141 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { 237 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
142 __ Stop("GenerateCallStaticFunctionStub"); 238 __ Stop("GenerateCallStaticFunctionStub");
143 } 239 }
144 240
145 241
146 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { 242 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
147 __ Stop("GenerateFixCallersTargetStub"); 243 __ Stop("GenerateFixCallersTargetStub");
(...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after
409 Register ic_reg = R5; 505 Register ic_reg = R5;
410 Register func_reg = temp_reg; 506 Register func_reg = temp_reg;
411 ASSERT(temp_reg == R6); 507 ASSERT(temp_reg == R6);
412 __ LoadFieldFromOffset(func_reg, ic_reg, ICData::function_offset()); 508 __ LoadFieldFromOffset(func_reg, ic_reg, ICData::function_offset());
413 __ LoadFieldFromOffset(R7, func_reg, Function::usage_counter_offset()); 509 __ LoadFieldFromOffset(R7, func_reg, Function::usage_counter_offset());
414 __ AddImmediate(R7, R7, 1, PP); 510 __ AddImmediate(R7, R7, 1, PP);
415 __ StoreFieldToOffset(R7, func_reg, Function::usage_counter_offset()); 511 __ StoreFieldToOffset(R7, func_reg, Function::usage_counter_offset());
416 } 512 }
417 513
418 514
515 // Generate inline cache check for 'num_args'.
516 // LR: return address.
517 // R5: inline cache data object.
518 // Control flow:
519 // - If receiver is null -> jump to IC miss.
520 // - If receiver is Smi -> load Smi class.
521 // - If receiver is not-Smi -> load receiver's class.
522 // - Check if 'num_args' (including receiver) match any IC data group.
523 // - Match found -> jump to target.
524 // - Match not found -> jump to IC miss.
419 void StubCode::GenerateNArgsCheckInlineCacheStub( 525 void StubCode::GenerateNArgsCheckInlineCacheStub(
420 Assembler* assembler, 526 Assembler* assembler,
421 intptr_t num_args, 527 intptr_t num_args,
422 const RuntimeEntry& handle_ic_miss) { 528 const RuntimeEntry& handle_ic_miss) {
423 __ Stop("GenerateNArgsCheckInlineCacheStub"); 529 ASSERT(num_args > 0);
424 } 530 #if defined(DEBUG)
425 531 { Label ok;
426 532 // Check that the IC data array has NumberOfArgumentsChecked() == num_args.
533 // 'num_args_tested' is stored as an untagged int.
534 __ LoadFieldFromOffset(R6, R5, ICData::num_args_tested_offset());
535 __ CompareImmediate(R6, num_args, PP);
536 __ b(&ok, EQ);
537 __ Stop("Incorrect stub for IC data");
538 __ Bind(&ok);
539 }
540 #endif // DEBUG
541
542 // Check single stepping.
543 Label not_stepping;
544 __ LoadFieldFromOffset(R6, CTX, Context::isolate_offset());
545 __ LoadFromOffset(R6, R6, Isolate::single_step_offset(), kUnsignedByte);
546 __ CompareImmediate(R6, 0, PP);
547 __ b(&not_stepping, EQ);
548 __ EnterStubFrame();
549 __ Push(R5); // Preserve IC data.
550 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
551 __ Pop(R5);
552 __ LeaveStubFrame();
553 __ Bind(&not_stepping);
554
555 // Load arguments descriptor into R4.
556 __ LoadFieldFromOffset(R4, R5, ICData::arguments_descriptor_offset());
557 // Loop that checks if there is an IC data match.
558 Label loop, update, test, found, get_class_id_as_smi;
559 // R5: IC data object (preserved).
560 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset());
561 // R6: ic_data_array with check entries: classes and target functions.
562 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag, PP);
563 // R6: points directly to the first ic data array element.
564
565 // Get the receiver's class ID (first read number of arguments from
566 // arguments descriptor array and then access the receiver from the stack).
567 __ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset());
568 __ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode.
569 __ sub(R7, R7, Operand(1));
570
571 // R0 <- [SP + (R7 << 3)]
572 __ ldr(R0, Address(SP, R7, UXTX, Address::Scaled));
573
574 {
575 // TODO(zra): Put this code in a subroutine call as with other architectures
576 // when we have a bl(Label& l) instruction.
577 // Instance in R0, return its class-id in R0 as Smi.
578 // Test if Smi -> load Smi class for comparison.
579 Label not_smi, done;
580 __ tsti(R0, kSmiTagMask);
581 __ b(&not_smi, NE);
582 __ LoadImmediate(R0, Smi::RawValue(kSmiCid), PP);
583 __ b(&done);
584
585 __ Bind(&not_smi);
586 __ LoadClassId(R0, R0);
587 __ SmiTag(R0);
588 __ Bind(&done);
589 }
590
591 // R7: argument_count - 1 (untagged).
592 // R0: receiver's class ID (smi).
593 __ ldr(R1, Address(R6)); // First class id (smi) to check.
594 __ b(&test);
595
596 __ Bind(&loop);
597 for (int i = 0; i < num_args; i++) {
598 if (i > 0) {
599 // If not the first, load the next argument's class ID.
600 __ AddImmediate(R0, R7, -i, PP);
601 // R0 <- [SP + (R0 << 3)]
602 __ ldr(R0, Address(SP, R0, UXTX, Address::Scaled));
603 {
604 // Instance in R0, return its class-id in R0 as Smi.
605 // Test if Smi -> load Smi class for comparison.
606 Label not_smi, done;
607 __ tsti(R0, kSmiTagMask);
608 __ b(&not_smi, NE);
609 __ LoadImmediate(R0, Smi::RawValue(kSmiCid), PP);
610 __ b(&done);
611
612 __ Bind(&not_smi);
613 __ LoadClassId(R0, R0);
614 __ SmiTag(R0);
615 __ Bind(&done);
616 }
617 // R0: next argument class ID (smi).
618 __ LoadFromOffset(R1, R6, i * kWordSize);
619 // R1: next class ID to check (smi).
620 }
621 __ CompareRegisters(R0, R1); // Class id match?
622 if (i < (num_args - 1)) {
623 __ b(&update, NE); // Continue.
624 } else {
625 // Last check, all checks before matched.
626 __ b(&found, EQ); // Break.
627 }
628 }
629 __ Bind(&update);
630 // Reload receiver class ID. It has not been destroyed when num_args == 1.
631 if (num_args > 1) {
632 __ ldr(R0, Address(SP, R7, UXTX, Address::Scaled));
633 {
634 // Instance in R0, return its class-id in R0 as Smi.
635 // Test if Smi -> load Smi class for comparison.
636 Label not_smi, done;
637 __ tsti(R0, kSmiTagMask);
638 __ b(&not_smi, NE);
639 __ LoadImmediate(R0, Smi::RawValue(kSmiCid), PP);
640 __ b(&done);
641
642 __ Bind(&not_smi);
643 __ LoadClassId(R0, R0);
644 __ SmiTag(R0);
645 __ Bind(&done);
646 }
647 }
648
649 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize;
650 __ AddImmediate(R6, R6, entry_size, PP); // Next entry.
651 __ ldr(R1, Address(R6)); // Next class ID.
652
653 __ Bind(&test);
654 __ CompareImmediate(R1, Smi::RawValue(kIllegalCid), PP); // Done?
655 __ b(&loop, NE);
656
657 // IC miss.
658 // Compute address of arguments.
659 // R7: argument_count - 1 (untagged).
660 // R7 <- SP + (R7 << 3)
661 __ add(R7, SP, Operand(R7, UXTX, 3)); // R7 is Untagged.
662 // R7: address of receiver.
663 // Create a stub frame as we are pushing some objects on the stack before
664 // calling into the runtime.
665 __ EnterStubFrame();
666 __ LoadObject(R0, Object::null_object(), PP);
667 // Preserve IC data object and arguments descriptor array and
668 // setup space on stack for result (target code object).
669 __ Push(R4); // Preserve arguments descriptor array.
670 __ Push(R5); // Preserve IC Data.
671 __ Push(R0); // Setup space on stack for the result (target code object).
672 // Push call arguments.
673 for (intptr_t i = 0; i < num_args; i++) {
674 __ LoadFromOffset(TMP, R7, -i * kWordSize);
675 __ Push(TMP);
676 }
677 // Pass IC data object.
678 __ Push(R5);
679 __ CallRuntime(handle_ic_miss, num_args + 1);
680 // Remove the call arguments pushed earlier, including the IC data object.
681 __ Drop(num_args + 1);
682 // Pop returned function object into R0.
683 // Restore arguments descriptor array and IC data array.
684 __ Pop(R0); // Pop returned function object into R0.
685 __ Pop(R5); // Restore IC Data.
686 __ Pop(R4); // Restore arguments descriptor array.
687 __ LeaveStubFrame();
688 Label call_target_function;
689 __ b(&call_target_function);
690
691 __ Bind(&found);
692 // R6: pointer to an IC data check group.
693 const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize;
694 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
695 __ LoadFromOffset(R0, R6, target_offset);
696 __ LoadFromOffset(R1, R6, count_offset);
697 __ adds(R1, R1, Operand(Smi::RawValue(1)));
698 __ StoreToOffset(R1, R6, count_offset);
699 __ b(&call_target_function, VC); // No overflow.
700 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), PP);
701 __ StoreToOffset(R1, R6, count_offset);
702
703 __ Bind(&call_target_function);
704 // R0: target function.
705 __ LoadFieldFromOffset(R2, R0, Function::code_offset());
706 __ LoadFieldFromOffset(R2, R2, Code::instructions_offset());
707 __ AddImmediate(R2, R2, Instructions::HeaderSize() - kHeapObjectTag, PP);
708 __ br(R2);
709 }
710
711
712 // Use inline cache data array to invoke the target or continue in inline
713 // cache miss handler. Stub for 1-argument check (receiver class).
714 // LR: return address.
715 // R5: inline cache data object.
716 // Inline cache data object structure:
717 // 0: function-name
718 // 1: N, number of arguments checked.
719 // 2 .. (length - 1): group of checks, each check containing:
720 // - N classes.
721 // - 1 target function.
427 void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) { 722 void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) {
428 __ Stop("GenerateOneArgCheckInlineCacheStub"); 723 GenerateUsageCounterIncrement(assembler, R6);
724 GenerateNArgsCheckInlineCacheStub(
725 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry);
429 } 726 }
430 727
431 728
432 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { 729 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) {
433 __ Stop("GenerateTwoArgsCheckInlineCacheStub"); 730 GenerateUsageCounterIncrement(assembler, R6);
434 } 731 GenerateNArgsCheckInlineCacheStub(
435 732 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry);
436 733 }
734
735
437 void StubCode::GenerateThreeArgsCheckInlineCacheStub(Assembler* assembler) { 736 void StubCode::GenerateThreeArgsCheckInlineCacheStub(Assembler* assembler) {
438 __ Stop("GenerateThreeArgsCheckInlineCacheStub"); 737 __ Stop("GenerateThreeArgsCheckInlineCacheStub");
439 } 738 }
440 739
441 740
442 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( 741 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub(
443 Assembler* assembler) { 742 Assembler* assembler) {
444 __ Stop("GenerateOneArgOptimizedCheckInlineCacheStub"); 743 __ Stop("GenerateOneArgOptimizedCheckInlineCacheStub");
445 } 744 }
446 745
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
525 __ AddImmediate(R2, R2, Instructions::HeaderSize() - kHeapObjectTag, PP); 824 __ AddImmediate(R2, R2, Instructions::HeaderSize() - kHeapObjectTag, PP);
526 __ br(R2); 825 __ br(R2);
527 } 826 }
528 827
529 828
530 void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) { 829 void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) {
531 __ Stop("GenerateTwoArgsUnoptimizedStaticCallStub"); 830 __ Stop("GenerateTwoArgsUnoptimizedStaticCallStub");
532 } 831 }
533 832
534 833
834 // Stub for compiling a function and jumping to the compiled code.
835 // R5: IC-Data (for methods).
836 // R4: Arguments descriptor.
837 // R0: Function.
535 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { 838 void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
536 __ Stop("GenerateLazyCompileStub"); 839 // Preserve arg desc. and IC data object.
840 __ EnterStubFrame();
841 __ Push(R5); // Save IC Data.
842 __ Push(R4); // Save arg. desc.
843 __ Push(R0); // Pass function.
844 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
845 __ Pop(R0); // Restore argument.
846 __ Pop(R4); // Restore arg desc.
847 __ Pop(R5); // Restore IC Data.
848 __ LeaveStubFrame();
849
850 __ LoadFieldFromOffset(R2, R0, Function::code_offset());
851 __ LoadFieldFromOffset(R2, R2, Code::instructions_offset());
852 __ AddImmediate(R2, R2, Instructions::HeaderSize() - kHeapObjectTag, PP);
853 __ br(R2);
537 } 854 }
538 855
539 856
540 void StubCode::GenerateBreakpointRuntimeStub(Assembler* assembler) { 857 void StubCode::GenerateBreakpointRuntimeStub(Assembler* assembler) {
541 __ Stop("GenerateBreakpointRuntimeStub"); 858 __ Stop("GenerateBreakpointRuntimeStub");
542 } 859 }
543 860
544 861
545 void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) { 862 // Called only from unoptimized code. All relevant registers have been saved.
546 __ Stop("GenerateDebugStepCheckStub"); 863 void StubCode::GenerateDebugStepCheckStub(
864 Assembler* assembler) {
865 // Check single stepping.
866 Label not_stepping;
867 __ LoadFieldFromOffset(R1, CTX, Context::isolate_offset());
868 __ LoadFromOffset(R1, R1, Isolate::single_step_offset(), kUnsignedByte);
869 __ CompareImmediate(R1, 0, PP);
870 __ b(&not_stepping, EQ);
871 __ EnterStubFrame();
872 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
873 __ LeaveStubFrame();
874 __ Bind(&not_stepping);
875 __ ret();
547 } 876 }
548 877
549 878
550 void StubCode::GenerateSubtype1TestCacheStub(Assembler* assembler) { 879 void StubCode::GenerateSubtype1TestCacheStub(Assembler* assembler) {
551 __ Stop("GenerateSubtype1TestCacheStub"); 880 __ Stop("GenerateSubtype1TestCacheStub");
552 } 881 }
553 882
554 883
555 void StubCode::GenerateSubtype2TestCacheStub(Assembler* assembler) { 884 void StubCode::GenerateSubtype2TestCacheStub(Assembler* assembler) {
556 __ Stop("GenerateSubtype2TestCacheStub"); 885 __ Stop("GenerateSubtype2TestCacheStub");
(...skipping 13 matching lines...) Expand all
570 void StubCode::GenerateJumpToExceptionHandlerStub(Assembler* assembler) { 899 void StubCode::GenerateJumpToExceptionHandlerStub(Assembler* assembler) {
571 __ Stop("GenerateJumpToExceptionHandlerStub"); 900 __ Stop("GenerateJumpToExceptionHandlerStub");
572 } 901 }
573 902
574 903
575 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { 904 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
576 __ Stop("GenerateOptimizeFunctionStub"); 905 __ Stop("GenerateOptimizeFunctionStub");
577 } 906 }
578 907
579 908
909 DECLARE_LEAF_RUNTIME_ENTRY(intptr_t,
910 BigintCompare,
911 RawBigint* left,
912 RawBigint* right);
913
914
915 // Does identical check (object references are equal or not equal) with special
916 // checks for boxed numbers.
917 // Left and right are pushed on stack.
918 // Return Zero condition flag set if equal.
919 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint
920 // cannot contain a value that fits in Mint or Smi.
580 void StubCode::GenerateIdenticalWithNumberCheckStub(Assembler* assembler, 921 void StubCode::GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
581 const Register left, 922 const Register left,
582 const Register right, 923 const Register right,
583 const Register temp, 924 const Register unused1,
584 const Register unused) { 925 const Register unused2) {
585 __ Stop("GenerateIdenticalWithNumberCheckStub"); 926 Label reference_compare, done, check_mint, check_bigint;
927 // If any of the arguments is Smi do reference compare.
928 __ tsti(left, kSmiTagMask);
929 __ b(&reference_compare, EQ);
930 __ tsti(right, kSmiTagMask);
931 __ b(&reference_compare, EQ);
932
933 // Value compare for two doubles.
934 __ CompareClassId(left, kDoubleCid);
935 __ b(&check_mint, NE);
936 __ CompareClassId(right, kDoubleCid);
937 __ b(&done, NE);
938
939 // Double values bitwise compare.
940 __ LoadFieldFromOffset(left, left, Double::value_offset());
941 __ LoadFieldFromOffset(right, right, Double::value_offset());
942 __ CompareRegisters(left, right);
943 __ b(&done);
944
945 __ Bind(&check_mint);
946 __ CompareClassId(left, kMintCid);
947 __ b(&check_bigint, NE);
948 __ CompareClassId(right, kMintCid);
949 __ b(&done, NE);
950 __ LoadFieldFromOffset(left, left, Mint::value_offset());
951 __ LoadFieldFromOffset(right, right, Mint::value_offset());
952 __ b(&done);
953
954 __ Bind(&check_bigint);
955 __ CompareClassId(left, kBigintCid);
956 __ b(&reference_compare, NE);
957 __ CompareClassId(right, kBigintCid);
958 __ b(&done, NE);
959 __ EnterFrame(0);
960 __ ReserveAlignedFrameSpace(2 * kWordSize);
961 __ StoreToOffset(left, SP, 0 * kWordSize);
962 __ StoreToOffset(right, SP, 1 * kWordSize);
963 __ CallRuntime(kBigintCompareRuntimeEntry, 2);
964 // Result in R0, 0 means equal.
965 __ LeaveFrame();
966 __ cmp(R0, Operand(0));
967 __ b(&done);
968
969 __ Bind(&reference_compare);
970 __ CompareRegisters(left, right);
971 __ Bind(&done);
586 } 972 }
587 973
588 974
975 // Called only from unoptimized code. All relevant registers have been saved.
976 // LR: return address.
977 // SP + 4: left operand.
978 // SP + 0: right operand.
979 // Return Zero condition flag set if equal.
589 void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub( 980 void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub(
590 Assembler* assembler) { 981 Assembler* assembler) {
591 __ Stop("GenerateUnoptimizedIdenticalWithNumberCheckStub"); 982 // Check single stepping.
983 Label not_stepping;
984 __ LoadFieldFromOffset(R1, CTX, Context::isolate_offset());
985 __ LoadFromOffset(R1, R1, Isolate::single_step_offset(), kUnsignedByte);
986 __ CompareImmediate(R1, 0, PP);
987 __ b(&not_stepping, EQ);
988 __ EnterStubFrame();
989 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
990 __ LeaveStubFrame();
991 __ Bind(&not_stepping);
992
993 const Register left = R1;
994 const Register right = R0;
995 __ LoadFromOffset(left, SP, 1 * kWordSize);
996 __ LoadFromOffset(right, SP, 0 * kWordSize);
997 GenerateIdenticalWithNumberCheckStub(assembler, left, right);
998 __ ret();
592 } 999 }
593 1000
594 1001
595 void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub( 1002 void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub(
596 Assembler* assembler) { 1003 Assembler* assembler) {
597 __ Stop("GenerateOptimizedIdenticalWithNumberCheckStub"); 1004 __ Stop("GenerateOptimizedIdenticalWithNumberCheckStub");
598 } 1005 }
599 1006
600 } // namespace dart 1007 } // namespace dart
601 1008
602 #endif // defined TARGET_ARCH_ARM64 1009 #endif // defined TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « runtime/vm/stack_frame_arm64.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698