Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(148)

Side by Side Diff: runtime/vm/stub_code_mips.cc

Issue 59613005: Merge (x & y) == 0 pattern to emit a single test instruction. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" 5 #include "vm/globals.h"
6 #if defined(TARGET_ARCH_MIPS) 6 #if defined(TARGET_ARCH_MIPS)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/code_generator.h" 9 #include "vm/code_generator.h"
10 #include "vm/compiler.h" 10 #include "vm/compiler.h"
(...skipping 395 matching lines...) Expand 10 before | Expand all | Expand 10 after
406 // S4: arguments descriptor array. 406 // S4: arguments descriptor array.
407 // Note: The receiver object is the first argument to the function being 407 // Note: The receiver object is the first argument to the function being
408 // called, the stub accesses the receiver from this location directly 408 // called, the stub accesses the receiver from this location directly
409 // when trying to resolve the call. 409 // when trying to resolve the call.
410 void StubCode::GenerateInstanceFunctionLookupStub(Assembler* assembler) { 410 void StubCode::GenerateInstanceFunctionLookupStub(Assembler* assembler) {
411 __ TraceSimMsg("InstanceFunctionLookupStub"); 411 __ TraceSimMsg("InstanceFunctionLookupStub");
412 __ EnterStubFrame(); 412 __ EnterStubFrame();
413 413
414 // Load the receiver. 414 // Load the receiver.
415 __ lw(A1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); 415 __ lw(A1, FieldAddress(S4, ArgumentsDescriptor::count_offset()));
416 __ sll(TMP1, A1, 1); // A1 is Smi. 416 __ sll(TMP, A1, 1); // A1 is Smi.
417 __ addu(TMP1, FP, TMP1); 417 __ addu(TMP, FP, TMP);
418 __ lw(T1, Address(TMP1, kParamEndSlotFromFp * kWordSize)); 418 __ lw(T1, Address(TMP, kParamEndSlotFromFp * kWordSize));
419 419
420 // Push space for the return value. 420 // Push space for the return value.
421 // Push the receiver. 421 // Push the receiver.
422 // Push TMP1 data object. 422 // Push TMP data object.
423 // Push arguments descriptor array. 423 // Push arguments descriptor array.
424 __ addiu(SP, SP, Immediate(-4 * kWordSize)); 424 __ addiu(SP, SP, Immediate(-4 * kWordSize));
425 __ LoadImmediate(TMP, reinterpret_cast<intptr_t>(Object::null())); 425 __ LoadImmediate(TMP, reinterpret_cast<intptr_t>(Object::null()));
426 __ sw(TMP, Address(SP, 3 * kWordSize)); 426 __ sw(TMP, Address(SP, 3 * kWordSize));
427 __ sw(T1, Address(SP, 2 * kWordSize)); 427 __ sw(T1, Address(SP, 2 * kWordSize));
428 __ sw(S5, Address(SP, 1 * kWordSize)); 428 __ sw(S5, Address(SP, 1 * kWordSize));
429 __ sw(S4, Address(SP, 0 * kWordSize)); 429 __ sw(S4, Address(SP, 0 * kWordSize));
430 430
431 // A1: Smi-tagged arguments array length. 431 // A1: Smi-tagged arguments array length.
432 PushArgumentsArray(assembler); 432 PushArgumentsArray(assembler);
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after
641 // NOTE: A1 cannot be clobbered here as the caller relies on it being saved. 641 // NOTE: A1 cannot be clobbered here as the caller relies on it being saved.
642 // The newly allocated object is returned in V0. 642 // The newly allocated object is returned in V0.
643 void StubCode::GenerateAllocateArrayStub(Assembler* assembler) { 643 void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
644 __ TraceSimMsg("AllocateArrayStub"); 644 __ TraceSimMsg("AllocateArrayStub");
645 Label slow_case; 645 Label slow_case;
646 if (FLAG_inline_alloc) { 646 if (FLAG_inline_alloc) {
647 // Compute the size to be allocated, it is based on the array length 647 // Compute the size to be allocated, it is based on the array length
648 // and is computed as: 648 // and is computed as:
649 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)). 649 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)).
650 // Assert that length is a Smi. 650 // Assert that length is a Smi.
651 __ andi(CMPRES, A1, Immediate(kSmiTagMask)); 651 __ andi(CMPRES1, A1, Immediate(kSmiTagMask));
652 if (FLAG_use_slow_path) { 652 if (FLAG_use_slow_path) {
653 __ b(&slow_case); 653 __ b(&slow_case);
654 } else { 654 } else {
655 __ bne(CMPRES, ZR, &slow_case); 655 __ bne(CMPRES1, ZR, &slow_case);
656 } 656 }
657 __ lw(T0, FieldAddress(CTX, Context::isolate_offset())); 657 __ lw(T0, FieldAddress(CTX, Context::isolate_offset()));
658 __ lw(T0, Address(T0, Isolate::heap_offset())); 658 __ lw(T0, Address(T0, Isolate::heap_offset()));
659 __ lw(T0, Address(T0, Heap::new_space_offset())); 659 __ lw(T0, Address(T0, Heap::new_space_offset()));
660 660
661 // Calculate and align allocation size. 661 // Calculate and align allocation size.
662 // Load new object start and calculate next object start. 662 // Load new object start and calculate next object start.
663 // A0: array element type. 663 // A0: array element type.
664 // A1: Array length as Smi. 664 // A1: Array length as Smi.
665 // T0: Points to new space object. 665 // T0: Points to new space object.
666 __ lw(V0, Address(T0, Scavenger::top_offset())); 666 __ lw(V0, Address(T0, Scavenger::top_offset()));
667 intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1; 667 intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1;
668 __ LoadImmediate(T3, fixed_size); 668 __ LoadImmediate(T3, fixed_size);
669 __ sll(TMP1, A1, 1); // A1 is Smi. 669 __ sll(TMP, A1, 1); // A1 is Smi.
670 __ addu(T3, T3, TMP1); 670 __ addu(T3, T3, TMP);
671 ASSERT(kSmiTagShift == 1); 671 ASSERT(kSmiTagShift == 1);
672 __ LoadImmediate(TMP1, ~(kObjectAlignment - 1)); 672 __ LoadImmediate(TMP, ~(kObjectAlignment - 1));
673 __ and_(T3, T3, TMP1); 673 __ and_(T3, T3, TMP);
674 __ addu(T2, T3, V0); 674 __ addu(T2, T3, V0);
675 675
676 // Check if the allocation fits into the remaining space. 676 // Check if the allocation fits into the remaining space.
677 // V0: potential new object start. 677 // V0: potential new object start.
678 // A0: array element type. 678 // A0: array element type.
679 // A1: array length as Smi. 679 // A1: array length as Smi.
680 // T0: points to new space object. 680 // T0: points to new space object.
681 // T2: potential next object start. 681 // T2: potential next object start.
682 // T3: array size. 682 // T3: array size.
683 __ lw(CMPRES1, Address(T0, Scavenger::end_offset())); 683 __ lw(CMPRES1, Address(T0, Scavenger::end_offset()));
(...skipping 25 matching lines...) Expand all
709 A1); 709 A1);
710 710
711 // Calculate the size tag. 711 // Calculate the size tag.
712 // V0: new object start as a tagged pointer. 712 // V0: new object start as a tagged pointer.
713 // A1: Array length as Smi. 713 // A1: Array length as Smi.
714 // T2: new object end address. 714 // T2: new object end address.
715 // T3: array size. 715 // T3: array size.
716 const intptr_t shift = RawObject::kSizeTagBit - kObjectAlignmentLog2; 716 const intptr_t shift = RawObject::kSizeTagBit - kObjectAlignmentLog2;
717 // If no size tag overflow, shift T3 left, else set T3 to zero. 717 // If no size tag overflow, shift T3 left, else set T3 to zero.
718 __ LoadImmediate(T4, RawObject::SizeTag::kMaxSizeTag); 718 __ LoadImmediate(T4, RawObject::SizeTag::kMaxSizeTag);
719 __ sltu(CMPRES, T4, T3); // CMPRES = T4 < T3 ? 1 : 0 719 __ sltu(CMPRES1, T4, T3); // CMPRES1 = T4 < T3 ? 1 : 0
720 __ sll(TMP1, T3, shift); // TMP1 = T3 << shift; 720 __ sll(TMP, T3, shift); // TMP = T3 << shift;
721 __ movz(T3, TMP1, CMPRES); // T3 = T4 >= T3 ? 0 : T3 721 __ movz(T3, TMP, CMPRES1); // T3 = T4 >= T3 ? 0 : T3
722 __ movn(T3, ZR, CMPRES); // T3 = T4 < T3 ? TMP1 : T3 722 __ movn(T3, ZR, CMPRES1); // T3 = T4 < T3 ? TMP : T3
723 723
724 // Get the class index and insert it into the tags. 724 // Get the class index and insert it into the tags.
725 __ LoadImmediate(TMP1, RawObject::ClassIdTag::encode(kArrayCid)); 725 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(kArrayCid));
726 __ or_(T3, T3, TMP1); 726 __ or_(T3, T3, TMP);
727 __ sw(T3, FieldAddress(V0, Array::tags_offset())); 727 __ sw(T3, FieldAddress(V0, Array::tags_offset()));
728 728
729 // Initialize all array elements to raw_null. 729 // Initialize all array elements to raw_null.
730 // V0: new object start as a tagged pointer. 730 // V0: new object start as a tagged pointer.
731 // T2: new object end address. 731 // T2: new object end address.
732 // A1: Array length as Smi. 732 // A1: Array length as Smi.
733 __ AddImmediate(T3, V0, Array::data_offset() - kHeapObjectTag); 733 __ AddImmediate(T3, V0, Array::data_offset() - kHeapObjectTag);
734 // T3: iterator which initially points to the start of the variable 734 // T3: iterator which initially points to the start of the variable
735 // data area to be initialized. 735 // data area to be initialized.
736 736
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
779 // SP: address of last argument. 779 // SP: address of last argument.
780 // S4: Arguments descriptor array. 780 // S4: Arguments descriptor array.
781 // Return: V0. 781 // Return: V0.
782 // Note: The closure object is the first argument to the function being 782 // Note: The closure object is the first argument to the function being
783 // called, the stub accesses the closure from this location directly 783 // called, the stub accesses the closure from this location directly
784 // when trying to resolve the call. 784 // when trying to resolve the call.
785 void StubCode::GenerateCallClosureFunctionStub(Assembler* assembler) { 785 void StubCode::GenerateCallClosureFunctionStub(Assembler* assembler) {
786 // Load num_args. 786 // Load num_args.
787 __ TraceSimMsg("GenerateCallClosureFunctionStub"); 787 __ TraceSimMsg("GenerateCallClosureFunctionStub");
788 __ lw(T0, FieldAddress(S4, ArgumentsDescriptor::count_offset())); 788 __ lw(T0, FieldAddress(S4, ArgumentsDescriptor::count_offset()));
789 __ LoadImmediate(TMP1, Smi::RawValue(1)); 789 __ LoadImmediate(TMP, Smi::RawValue(1));
790 __ subu(T0, T0, TMP1); 790 __ subu(T0, T0, TMP);
791 791
792 // Load closure object in T1. 792 // Load closure object in T1.
793 __ sll(T1, T0, 1); // T0 (num_args - 1) is a Smi. 793 __ sll(T1, T0, 1); // T0 (num_args - 1) is a Smi.
794 __ addu(T1, SP, T1); 794 __ addu(T1, SP, T1);
795 __ lw(T1, Address(T1)); 795 __ lw(T1, Address(T1));
796 796
797 // Verify that T1 is a closure by checking its class. 797 // Verify that T1 is a closure by checking its class.
798 Label not_closure; 798 Label not_closure;
799 799
800 __ LoadImmediate(T7, reinterpret_cast<intptr_t>(Object::null())); 800 __ LoadImmediate(T7, reinterpret_cast<intptr_t>(Object::null()));
801 801
802 // See if it is not a closure, but null object. 802 // See if it is not a closure, but null object.
803 __ beq(T1, T7, &not_closure); 803 __ beq(T1, T7, &not_closure);
804 804
805 __ andi(CMPRES, T1, Immediate(kSmiTagMask)); 805 __ andi(CMPRES1, T1, Immediate(kSmiTagMask));
806 __ beq(CMPRES, ZR, &not_closure); // Not a closure, but a smi. 806 __ beq(CMPRES1, ZR, &not_closure); // Not a closure, but a smi.
807 807
808 // Verify that the class of the object is a closure class by checking that 808 // Verify that the class of the object is a closure class by checking that
809 // class.signature_function() is not null. 809 // class.signature_function() is not null.
810 __ LoadClass(T0, T1); 810 __ LoadClass(T0, T1);
811 __ lw(T0, FieldAddress(T0, Class::signature_function_offset())); 811 __ lw(T0, FieldAddress(T0, Class::signature_function_offset()));
812 812
813 // See if actual class is not a closure class. 813 // See if actual class is not a closure class.
814 __ beq(T0, T7, &not_closure); 814 __ beq(T0, T7, &not_closure);
815 815
816 // T0 is just the signature function. Load the actual closure function. 816 // T0 is just the signature function. Load the actual closure function.
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after
1064 // T2: object size. 1064 // T2: object size.
1065 __ LoadImmediate(T5, heap->TopAddress()); 1065 __ LoadImmediate(T5, heap->TopAddress());
1066 __ lw(V0, Address(T5, 0)); 1066 __ lw(V0, Address(T5, 0));
1067 __ addu(T3, T2, V0); 1067 __ addu(T3, T2, V0);
1068 1068
1069 // Check if the allocation fits into the remaining space. 1069 // Check if the allocation fits into the remaining space.
1070 // V0: potential new object. 1070 // V0: potential new object.
1071 // T1: number of context variables. 1071 // T1: number of context variables.
1072 // T2: object size. 1072 // T2: object size.
1073 // T3: potential next object start. 1073 // T3: potential next object start.
1074 __ LoadImmediate(TMP1, heap->EndAddress()); 1074 __ LoadImmediate(TMP, heap->EndAddress());
1075 __ lw(CMPRES1, Address(TMP1, 0)); 1075 __ lw(CMPRES1, Address(TMP, 0));
1076 if (FLAG_use_slow_path) { 1076 if (FLAG_use_slow_path) {
1077 __ b(&slow_case); 1077 __ b(&slow_case);
1078 } else { 1078 } else {
1079 __ BranchUnsignedGreaterEqual(T3, CMPRES1, &slow_case); 1079 __ BranchUnsignedGreaterEqual(T3, CMPRES1, &slow_case);
1080 } 1080 }
1081 1081
1082 // Successfully allocated the object, now update top to point to 1082 // Successfully allocated the object, now update top to point to
1083 // next object start and initialize the object. 1083 // next object start and initialize the object.
1084 // V0: new object. 1084 // V0: new object.
1085 // T1: number of context variables. 1085 // T1: number of context variables.
1086 // T2: object size. 1086 // T2: object size.
1087 // T3: next object start. 1087 // T3: next object start.
1088 __ sw(T3, Address(T5, 0)); 1088 __ sw(T3, Address(T5, 0));
1089 __ addiu(V0, V0, Immediate(kHeapObjectTag)); 1089 __ addiu(V0, V0, Immediate(kHeapObjectTag));
1090 1090
1091 // Calculate the size tag. 1091 // Calculate the size tag.
1092 // V0: new object. 1092 // V0: new object.
1093 // T1: number of context variables. 1093 // T1: number of context variables.
1094 // T2: object size. 1094 // T2: object size.
1095 const intptr_t shift = RawObject::kSizeTagBit - kObjectAlignmentLog2; 1095 const intptr_t shift = RawObject::kSizeTagBit - kObjectAlignmentLog2;
1096 __ LoadImmediate(TMP1, RawObject::SizeTag::kMaxSizeTag); 1096 __ LoadImmediate(TMP, RawObject::SizeTag::kMaxSizeTag);
1097 __ sltu(CMPRES, TMP1, T2); // CMPRES = T2 > TMP1 ? 1 : 0. 1097 __ sltu(CMPRES1, TMP, T2); // CMPRES1 = T2 > TMP ? 1 : 0.
1098 __ movn(T2, ZR, CMPRES); // T2 = CMPRES != 0 ? 0 : T2. 1098 __ movn(T2, ZR, CMPRES1); // T2 = CMPRES1 != 0 ? 0 : T2.
1099 __ sll(TMP1, T2, shift); // TMP1 = T2 << shift. 1099 __ sll(TMP, T2, shift); // TMP = T2 << shift.
1100 __ movz(T2, TMP1, CMPRES); // T2 = CMPRES == 0 ? TMP1 : T2. 1100 __ movz(T2, TMP, CMPRES1); // T2 = CMPRES1 == 0 ? TMP : T2.
1101 1101
1102 // Get the class index and insert it into the tags. 1102 // Get the class index and insert it into the tags.
1103 // T2: size and bit tags. 1103 // T2: size and bit tags.
1104 __ LoadImmediate(TMP1, RawObject::ClassIdTag::encode(context_class.id())); 1104 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(context_class.id()));
1105 __ or_(T2, T2, TMP1); 1105 __ or_(T2, T2, TMP);
1106 __ sw(T2, FieldAddress(V0, Context::tags_offset())); 1106 __ sw(T2, FieldAddress(V0, Context::tags_offset()));
1107 1107
1108 // Setup up number of context variables field. 1108 // Setup up number of context variables field.
1109 // V0: new object. 1109 // V0: new object.
1110 // T1: number of context variables as integer value (not object). 1110 // T1: number of context variables as integer value (not object).
1111 __ sw(T1, FieldAddress(V0, Context::num_variables_offset())); 1111 __ sw(T1, FieldAddress(V0, Context::num_variables_offset()));
1112 1112
1113 // Setup isolate field. 1113 // Setup isolate field.
1114 // Load Isolate pointer from Context structure into R2. 1114 // Load Isolate pointer from Context structure into R2.
1115 // V0: new object. 1115 // V0: new object.
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1174 __ sw(T3, Address(SP, 2 * kWordSize)); 1174 __ sw(T3, Address(SP, 2 * kWordSize));
1175 __ sw(T2, Address(SP, 1 * kWordSize)); 1175 __ sw(T2, Address(SP, 1 * kWordSize));
1176 __ sw(T1, Address(SP, 0 * kWordSize)); 1176 __ sw(T1, Address(SP, 0 * kWordSize));
1177 1177
1178 Label add_to_buffer; 1178 Label add_to_buffer;
1179 // Check whether this object has already been remembered. Skip adding to the 1179 // Check whether this object has already been remembered. Skip adding to the
1180 // store buffer if the object is in the store buffer already. 1180 // store buffer if the object is in the store buffer already.
1181 // Spilled: T1, T2, T3. 1181 // Spilled: T1, T2, T3.
1182 // T0: Address being stored. 1182 // T0: Address being stored.
1183 __ lw(T2, FieldAddress(T0, Object::tags_offset())); 1183 __ lw(T2, FieldAddress(T0, Object::tags_offset()));
1184 __ andi(CMPRES, T2, Immediate(1 << RawObject::kRememberedBit)); 1184 __ andi(CMPRES1, T2, Immediate(1 << RawObject::kRememberedBit));
1185 __ beq(CMPRES, ZR, &add_to_buffer); 1185 __ beq(CMPRES1, ZR, &add_to_buffer);
1186 __ lw(T1, Address(SP, 0 * kWordSize)); 1186 __ lw(T1, Address(SP, 0 * kWordSize));
1187 __ lw(T2, Address(SP, 1 * kWordSize)); 1187 __ lw(T2, Address(SP, 1 * kWordSize));
1188 __ lw(T3, Address(SP, 2 * kWordSize)); 1188 __ lw(T3, Address(SP, 2 * kWordSize));
1189 __ addiu(SP, SP, Immediate(3 * kWordSize)); 1189 __ addiu(SP, SP, Immediate(3 * kWordSize));
1190 __ Ret(); 1190 __ Ret();
1191 1191
1192 __ Bind(&add_to_buffer); 1192 __ Bind(&add_to_buffer);
1193 __ ori(T2, T2, Immediate(1 << RawObject::kRememberedBit)); 1193 __ ori(T2, T2, Immediate(1 << RawObject::kRememberedBit));
1194 __ sw(T2, FieldAddress(T0, Object::tags_offset())); 1194 __ sw(T2, FieldAddress(T0, Object::tags_offset()));
1195 1195
(...skipping 10 matching lines...) Expand all
1206 __ sll(T3, T2, 2); 1206 __ sll(T3, T2, 2);
1207 __ addu(T3, T1, T3); 1207 __ addu(T3, T1, T3);
1208 __ sw(T0, Address(T3, StoreBufferBlock::pointers_offset())); 1208 __ sw(T0, Address(T3, StoreBufferBlock::pointers_offset()));
1209 1209
1210 // Increment top_ and check for overflow. 1210 // Increment top_ and check for overflow.
1211 // T2: top_ 1211 // T2: top_
1212 // T1: StoreBufferBlock 1212 // T1: StoreBufferBlock
1213 Label L; 1213 Label L;
1214 __ addiu(T2, T2, Immediate(1)); 1214 __ addiu(T2, T2, Immediate(1));
1215 __ sw(T2, Address(T1, StoreBufferBlock::top_offset())); 1215 __ sw(T2, Address(T1, StoreBufferBlock::top_offset()));
1216 __ addiu(CMPRES, T2, Immediate(-StoreBufferBlock::kSize)); 1216 __ addiu(CMPRES1, T2, Immediate(-StoreBufferBlock::kSize));
1217 // Restore values. 1217 // Restore values.
1218 __ lw(T1, Address(SP, 0 * kWordSize)); 1218 __ lw(T1, Address(SP, 0 * kWordSize));
1219 __ lw(T2, Address(SP, 1 * kWordSize)); 1219 __ lw(T2, Address(SP, 1 * kWordSize));
1220 __ lw(T3, Address(SP, 2 * kWordSize)); 1220 __ lw(T3, Address(SP, 2 * kWordSize));
1221 __ beq(CMPRES, ZR, &L); 1221 __ beq(CMPRES1, ZR, &L);
1222 __ delay_slot()->addiu(SP, SP, Immediate(3 * kWordSize)); 1222 __ delay_slot()->addiu(SP, SP, Immediate(3 * kWordSize));
1223 __ Ret(); 1223 __ Ret();
1224 1224
1225 // Handle overflow: Call the runtime leaf function. 1225 // Handle overflow: Call the runtime leaf function.
1226 __ Bind(&L); 1226 __ Bind(&L);
1227 // Setup frame, push callee-saved registers. 1227 // Setup frame, push callee-saved registers.
1228 1228
1229 __ EnterCallRuntimeFrame(1 * kWordSize); 1229 __ EnterCallRuntimeFrame(1 * kWordSize);
1230 __ lw(A0, FieldAddress(CTX, Context::isolate_offset())); 1230 __ lw(A0, FieldAddress(CTX, Context::isolate_offset()));
1231 __ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1); 1231 __ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
1273 &no_instantiator); 1273 &no_instantiator);
1274 __ delay_slot()->mov(T4, T3); 1274 __ delay_slot()->mov(T4, T3);
1275 __ AddImmediate(T3, type_args_size); 1275 __ AddImmediate(T3, type_args_size);
1276 __ Bind(&no_instantiator); 1276 __ Bind(&no_instantiator);
1277 // T4: potential new object end and, if T4 != T3, potential new 1277 // T4: potential new object end and, if T4 != T3, potential new
1278 // InstantiatedTypeArguments object start. 1278 // InstantiatedTypeArguments object start.
1279 } 1279 }
1280 // Check if the allocation fits into the remaining space. 1280 // Check if the allocation fits into the remaining space.
1281 // T2: potential new object start. 1281 // T2: potential new object start.
1282 // T3: potential next object start. 1282 // T3: potential next object start.
1283 __ LoadImmediate(TMP1, heap->EndAddress()); 1283 __ LoadImmediate(TMP, heap->EndAddress());
1284 __ lw(CMPRES1, Address(TMP1)); 1284 __ lw(CMPRES1, Address(TMP));
1285 if (FLAG_use_slow_path) { 1285 if (FLAG_use_slow_path) {
1286 __ b(&slow_case); 1286 __ b(&slow_case);
1287 } else { 1287 } else {
1288 __ BranchUnsignedGreaterEqual(T3, CMPRES1, &slow_case); 1288 __ BranchUnsignedGreaterEqual(T3, CMPRES1, &slow_case);
1289 } 1289 }
1290 1290
1291 // Successfully allocated the object(s), now update top to point to 1291 // Successfully allocated the object(s), now update top to point to
1292 // next object start and initialize the object. 1292 // next object start and initialize the object.
1293 __ sw(T3, Address(T5)); 1293 __ sw(T3, Address(T5));
1294 1294
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
1376 1376
1377 __ Bind(&slow_case); 1377 __ Bind(&slow_case);
1378 } 1378 }
1379 if (is_cls_parameterized) { 1379 if (is_cls_parameterized) {
1380 __ lw(T1, Address(SP, 1 * kWordSize)); 1380 __ lw(T1, Address(SP, 1 * kWordSize));
1381 __ lw(T0, Address(SP, 0 * kWordSize)); 1381 __ lw(T0, Address(SP, 0 * kWordSize));
1382 } 1382 }
1383 // Create a stub frame as we are pushing some objects on the stack before 1383 // Create a stub frame as we are pushing some objects on the stack before
1384 // calling into the runtime. 1384 // calling into the runtime.
1385 __ EnterStubFrame(true); // Uses pool pointer to pass cls to runtime. 1385 __ EnterStubFrame(true); // Uses pool pointer to pass cls to runtime.
1386 __ LoadObject(TMP1, cls); 1386 __ LoadObject(TMP, cls);
1387 1387
1388 __ addiu(SP, SP, Immediate(-4 * kWordSize)); 1388 __ addiu(SP, SP, Immediate(-4 * kWordSize));
1389 // Space on stack for return value. 1389 // Space on stack for return value.
1390 __ LoadImmediate(T7, reinterpret_cast<intptr_t>(Object::null())); 1390 __ LoadImmediate(T7, reinterpret_cast<intptr_t>(Object::null()));
1391 __ sw(T7, Address(SP, 3 * kWordSize)); 1391 __ sw(T7, Address(SP, 3 * kWordSize));
1392 __ sw(TMP1, Address(SP, 2 * kWordSize)); // Class of object to be allocated. 1392 __ sw(TMP, Address(SP, 2 * kWordSize)); // Class of object to be allocated.
1393 1393
1394 if (is_cls_parameterized) { 1394 if (is_cls_parameterized) {
1395 // Push type arguments of object to be allocated and of instantiator. 1395 // Push type arguments of object to be allocated and of instantiator.
1396 __ sw(T1, Address(SP, 1 * kWordSize)); 1396 __ sw(T1, Address(SP, 1 * kWordSize));
1397 __ sw(T0, Address(SP, 0 * kWordSize)); 1397 __ sw(T0, Address(SP, 0 * kWordSize));
1398 } else { 1398 } else {
1399 // Push null type arguments and kNoInstantiator. 1399 // Push null type arguments and kNoInstantiator.
1400 __ LoadImmediate(T1, Smi::RawValue(StubCode::kNoInstantiator)); 1400 __ LoadImmediate(T1, Smi::RawValue(StubCode::kNoInstantiator));
1401 __ sw(T7, Address(SP, 1 * kWordSize)); 1401 __ sw(T7, Address(SP, 1 * kWordSize));
1402 __ sw(T1, Address(SP, 0 * kWordSize)); 1402 __ sw(T1, Address(SP, 0 * kWordSize));
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1440 __ lw(T2, Address(T5)); 1440 __ lw(T2, Address(T5));
1441 __ AddImmediate(T3, T2, closure_size); 1441 __ AddImmediate(T3, T2, closure_size);
1442 if (is_implicit_instance_closure) { 1442 if (is_implicit_instance_closure) {
1443 __ mov(T4, T3); // T4: new context address. 1443 __ mov(T4, T3); // T4: new context address.
1444 __ AddImmediate(T3, context_size); 1444 __ AddImmediate(T3, context_size);
1445 } 1445 }
1446 // Check if the allocation fits into the remaining space. 1446 // Check if the allocation fits into the remaining space.
1447 // T2: potential new closure object. 1447 // T2: potential new closure object.
1448 // T3: address of top of heap. 1448 // T3: address of top of heap.
1449 // T4: potential new context object (only if is_implicit_closure). 1449 // T4: potential new context object (only if is_implicit_closure).
1450 __ LoadImmediate(TMP1, heap->EndAddress()); 1450 __ LoadImmediate(TMP, heap->EndAddress());
1451 __ lw(CMPRES1, Address(TMP1)); 1451 __ lw(CMPRES1, Address(TMP));
1452 if (FLAG_use_slow_path) { 1452 if (FLAG_use_slow_path) {
1453 __ b(&slow_case); 1453 __ b(&slow_case);
1454 } else { 1454 } else {
1455 __ BranchUnsignedGreaterEqual(T3, CMPRES1, &slow_case); 1455 __ BranchUnsignedGreaterEqual(T3, CMPRES1, &slow_case);
1456 } 1456 }
1457 1457
1458 // Successfully allocated the object, now update top to point to 1458 // Successfully allocated the object, now update top to point to
1459 // next object start and initialize the object. 1459 // next object start and initialize the object.
1460 __ sw(T3, Address(T5)); 1460 __ sw(T3, Address(T5));
1461 1461
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1520 1520
1521 __ Bind(&slow_case); 1521 __ Bind(&slow_case);
1522 } 1522 }
1523 1523
1524 // If it's an implicit instance closure we need 4 stack slots, o/w only 3. 1524 // If it's an implicit instance closure we need 4 stack slots, o/w only 3.
1525 intptr_t num_slots = is_implicit_instance_closure ? 4 : 3; 1525 intptr_t num_slots = is_implicit_instance_closure ? 4 : 3;
1526 __ addiu(SP, SP, Immediate(-num_slots * kWordSize)); 1526 __ addiu(SP, SP, Immediate(-num_slots * kWordSize));
1527 // Setup space on stack for return value. 1527 // Setup space on stack for return value.
1528 __ LoadImmediate(T7, reinterpret_cast<intptr_t>(Object::null())); 1528 __ LoadImmediate(T7, reinterpret_cast<intptr_t>(Object::null()));
1529 __ sw(T7, Address(SP, (num_slots - 1) * kWordSize)); 1529 __ sw(T7, Address(SP, (num_slots - 1) * kWordSize));
1530 __ LoadObject(TMP1, func); 1530 __ LoadObject(TMP, func);
1531 __ sw(TMP1, Address(SP, (num_slots - 2) * kWordSize)); 1531 __ sw(TMP, Address(SP, (num_slots - 2) * kWordSize));
1532 __ mov(T2, T7); 1532 __ mov(T2, T7);
1533 if (is_implicit_instance_closure) { 1533 if (is_implicit_instance_closure) {
1534 __ lw(T1, Address(FP, kReceiverFPOffset)); 1534 __ lw(T1, Address(FP, kReceiverFPOffset));
1535 __ sw(T1, Address(SP, (num_slots - 3) * kWordSize)); // Receiver. 1535 __ sw(T1, Address(SP, (num_slots - 3) * kWordSize)); // Receiver.
1536 } 1536 }
1537 if (has_type_arguments) { 1537 if (has_type_arguments) {
1538 __ lw(T2, Address(FP, kTypeArgumentsFPOffset)); 1538 __ lw(T2, Address(FP, kTypeArgumentsFPOffset));
1539 } 1539 }
1540 __ sw(T2, Address(SP, 0 * kWordSize)); 1540 __ sw(T2, Address(SP, 0 * kWordSize));
1541 1541
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
1686 Label loop, update, test, found, get_class_id_as_smi; 1686 Label loop, update, test, found, get_class_id_as_smi;
1687 // S5: IC data object (preserved). 1687 // S5: IC data object (preserved).
1688 __ lw(T0, FieldAddress(S5, ICData::ic_data_offset())); 1688 __ lw(T0, FieldAddress(S5, ICData::ic_data_offset()));
1689 // T0: ic_data_array with check entries: classes and target functions. 1689 // T0: ic_data_array with check entries: classes and target functions.
1690 __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag); 1690 __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag);
1691 // T0: points directly to the first ic data array element. 1691 // T0: points directly to the first ic data array element.
1692 1692
1693 // Get the receiver's class ID (first read number of arguments from 1693 // Get the receiver's class ID (first read number of arguments from
1694 // arguments descriptor array and then access the receiver from the stack). 1694 // arguments descriptor array and then access the receiver from the stack).
1695 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); 1695 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset()));
1696 __ LoadImmediate(TMP1, Smi::RawValue(1)); 1696 __ LoadImmediate(TMP, Smi::RawValue(1));
1697 __ subu(T1, T1, TMP1); 1697 __ subu(T1, T1, TMP);
1698 __ sll(T3, T1, 1); // T1 (argument_count - 1) is smi. 1698 __ sll(T3, T1, 1); // T1 (argument_count - 1) is smi.
1699 __ addu(T3, T3, SP); 1699 __ addu(T3, T3, SP);
1700 __ bal(&get_class_id_as_smi); 1700 __ bal(&get_class_id_as_smi);
1701 __ delay_slot()->lw(T3, Address(T3)); 1701 __ delay_slot()->lw(T3, Address(T3));
1702 // T1: argument_count - 1 (smi). 1702 // T1: argument_count - 1 (smi).
1703 // T3: receiver's class ID (smi). 1703 // T3: receiver's class ID (smi).
1704 __ b(&test); 1704 __ b(&test);
1705 __ delay_slot()->lw(T4, Address(T0)); // First class id (smi) to check. 1705 __ delay_slot()->lw(T4, Address(T0)); // First class id (smi) to check.
1706 1706
1707 __ Bind(&loop); 1707 __ Bind(&loop);
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
1761 // Preserve IC data object and arguments descriptor array and 1761 // Preserve IC data object and arguments descriptor array and
1762 // setup space on stack for result (target code object). 1762 // setup space on stack for result (target code object).
1763 int num_slots = num_args + 4; 1763 int num_slots = num_args + 4;
1764 __ addiu(SP, SP, Immediate(-num_slots * kWordSize)); 1764 __ addiu(SP, SP, Immediate(-num_slots * kWordSize));
1765 __ sw(S5, Address(SP, (num_slots - 1) * kWordSize)); 1765 __ sw(S5, Address(SP, (num_slots - 1) * kWordSize));
1766 __ sw(S4, Address(SP, (num_slots - 2) * kWordSize)); 1766 __ sw(S4, Address(SP, (num_slots - 2) * kWordSize));
1767 __ LoadImmediate(TMP, reinterpret_cast<intptr_t>(Object::null())); 1767 __ LoadImmediate(TMP, reinterpret_cast<intptr_t>(Object::null()));
1768 __ sw(TMP, Address(SP, (num_slots - 3) * kWordSize)); 1768 __ sw(TMP, Address(SP, (num_slots - 3) * kWordSize));
1769 // Push call arguments. 1769 // Push call arguments.
1770 for (intptr_t i = 0; i < num_args; i++) { 1770 for (intptr_t i = 0; i < num_args; i++) {
1771 __ lw(TMP1, Address(T1, -i * kWordSize)); 1771 __ lw(TMP, Address(T1, -i * kWordSize));
1772 __ sw(TMP1, Address(SP, (num_slots - i - 4) * kWordSize)); 1772 __ sw(TMP, Address(SP, (num_slots - i - 4) * kWordSize));
1773 } 1773 }
1774 // Pass IC data object. 1774 // Pass IC data object.
1775 __ sw(S5, Address(SP, (num_slots - num_args - 4) * kWordSize)); 1775 __ sw(S5, Address(SP, (num_slots - num_args - 4) * kWordSize));
1776 __ CallRuntime(handle_ic_miss, num_args + 1); 1776 __ CallRuntime(handle_ic_miss, num_args + 1);
1777 __ TraceSimMsg("NArgsCheckInlineCacheStub return"); 1777 __ TraceSimMsg("NArgsCheckInlineCacheStub return");
1778 // Pop returned code object into T3 (null if not found). 1778 // Pop returned code object into T3 (null if not found).
1779 // Restore arguments descriptor array and IC data array. 1779 // Restore arguments descriptor array and IC data array.
1780 __ lw(T3, Address(SP, (num_slots - 3) * kWordSize)); 1780 __ lw(T3, Address(SP, (num_slots - 3) * kWordSize));
1781 __ lw(S4, Address(SP, (num_slots - 2) * kWordSize)); 1781 __ lw(S4, Address(SP, (num_slots - 2) * kWordSize));
1782 __ lw(S5, Address(SP, (num_slots - 1) * kWordSize)); 1782 __ lw(S5, Address(SP, (num_slots - 1) * kWordSize));
(...skipping 505 matching lines...) Expand 10 before | Expand all | Expand 10 after
2288 2288
2289 2289
2290 DECLARE_LEAF_RUNTIME_ENTRY(intptr_t, 2290 DECLARE_LEAF_RUNTIME_ENTRY(intptr_t,
2291 BigintCompare, 2291 BigintCompare,
2292 RawBigint* left, 2292 RawBigint* left,
2293 RawBigint* right); 2293 RawBigint* right);
2294 2294
2295 2295
2296 // Does identical check (object references are equal or not equal) with special 2296 // Does identical check (object references are equal or not equal) with special
2297 // checks for boxed numbers. 2297 // checks for boxed numbers.
2298 // Returns: CMPRES is zero if equal, non-zero otherwise. 2298 // Returns: CMPRES1 is zero if equal, non-zero otherwise.
2299 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint 2299 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint
2300 // cannot contain a value that fits in Mint or Smi. 2300 // cannot contain a value that fits in Mint or Smi.
2301 void StubCode::GenerateIdenticalWithNumberCheckStub(Assembler* assembler, 2301 void StubCode::GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
2302 const Register left, 2302 const Register left,
2303 const Register right, 2303 const Register right,
2304 const Register temp1, 2304 const Register temp1,
2305 const Register temp2) { 2305 const Register temp2) {
2306 __ TraceSimMsg("IdenticalWithNumberCheckStub"); 2306 __ TraceSimMsg("IdenticalWithNumberCheckStub");
2307 __ Comment("IdenticalWithNumberCheckStub"); 2307 __ Comment("IdenticalWithNumberCheckStub");
2308 Label reference_compare, done, check_mint, check_bigint; 2308 Label reference_compare, done, check_mint, check_bigint;
2309 // If any of the arguments is Smi do reference compare. 2309 // If any of the arguments is Smi do reference compare.
2310 __ andi(temp1, left, Immediate(kSmiTagMask)); 2310 __ andi(temp1, left, Immediate(kSmiTagMask));
2311 __ beq(temp1, ZR, &reference_compare); 2311 __ beq(temp1, ZR, &reference_compare);
2312 __ andi(temp1, right, Immediate(kSmiTagMask)); 2312 __ andi(temp1, right, Immediate(kSmiTagMask));
2313 __ beq(temp1, ZR, &reference_compare); 2313 __ beq(temp1, ZR, &reference_compare);
2314 2314
2315 // Value compare for two doubles. 2315 // Value compare for two doubles.
2316 __ LoadImmediate(temp1, kDoubleCid); 2316 __ LoadImmediate(temp1, kDoubleCid);
2317 __ LoadClassId(temp2, left); 2317 __ LoadClassId(temp2, left);
2318 __ bne(temp1, temp2, &check_mint); 2318 __ bne(temp1, temp2, &check_mint);
2319 __ LoadClassId(temp2, right); 2319 __ LoadClassId(temp2, right);
2320 __ subu(CMPRES, temp1, temp2); 2320 __ subu(CMPRES1, temp1, temp2);
2321 __ bne(CMPRES, ZR, &done); 2321 __ bne(CMPRES1, ZR, &done);
2322 2322
2323 // Double values bitwise compare. 2323 // Double values bitwise compare.
2324 __ lw(temp1, FieldAddress(left, Double::value_offset() + 0 * kWordSize)); 2324 __ lw(temp1, FieldAddress(left, Double::value_offset() + 0 * kWordSize));
2325 __ lw(temp2, FieldAddress(right, Double::value_offset() + 0 * kWordSize)); 2325 __ lw(temp2, FieldAddress(right, Double::value_offset() + 0 * kWordSize));
2326 __ subu(CMPRES, temp1, temp2); 2326 __ subu(CMPRES1, temp1, temp2);
2327 __ bne(CMPRES, ZR, &done); 2327 __ bne(CMPRES1, ZR, &done);
2328 __ lw(temp1, FieldAddress(left, Double::value_offset() + 1 * kWordSize)); 2328 __ lw(temp1, FieldAddress(left, Double::value_offset() + 1 * kWordSize));
2329 __ lw(temp2, FieldAddress(right, Double::value_offset() + 1 * kWordSize)); 2329 __ lw(temp2, FieldAddress(right, Double::value_offset() + 1 * kWordSize));
2330 __ b(&done); 2330 __ b(&done);
2331 __ delay_slot()->subu(CMPRES, temp1, temp2); 2331 __ delay_slot()->subu(CMPRES1, temp1, temp2);
2332 2332
2333 __ Bind(&check_mint); 2333 __ Bind(&check_mint);
2334 __ LoadImmediate(temp1, kMintCid); 2334 __ LoadImmediate(temp1, kMintCid);
2335 __ LoadClassId(temp2, left); 2335 __ LoadClassId(temp2, left);
2336 __ bne(temp1, temp2, &check_bigint); 2336 __ bne(temp1, temp2, &check_bigint);
2337 __ LoadClassId(temp2, right); 2337 __ LoadClassId(temp2, right);
2338 __ subu(CMPRES, temp1, temp2); 2338 __ subu(CMPRES1, temp1, temp2);
2339 __ bne(CMPRES, ZR, &done); 2339 __ bne(CMPRES1, ZR, &done);
2340 2340
2341 __ lw(temp1, FieldAddress(left, Mint::value_offset() + 0 * kWordSize)); 2341 __ lw(temp1, FieldAddress(left, Mint::value_offset() + 0 * kWordSize));
2342 __ lw(temp2, FieldAddress(right, Mint::value_offset() + 0 * kWordSize)); 2342 __ lw(temp2, FieldAddress(right, Mint::value_offset() + 0 * kWordSize));
2343 __ subu(CMPRES, temp1, temp2); 2343 __ subu(CMPRES1, temp1, temp2);
2344 __ bne(CMPRES, ZR, &done); 2344 __ bne(CMPRES1, ZR, &done);
2345 __ lw(temp1, FieldAddress(left, Mint::value_offset() + 1 * kWordSize)); 2345 __ lw(temp1, FieldAddress(left, Mint::value_offset() + 1 * kWordSize));
2346 __ lw(temp2, FieldAddress(right, Mint::value_offset() + 1 * kWordSize)); 2346 __ lw(temp2, FieldAddress(right, Mint::value_offset() + 1 * kWordSize));
2347 __ b(&done); 2347 __ b(&done);
2348 __ delay_slot()->subu(CMPRES, temp1, temp2); 2348 __ delay_slot()->subu(CMPRES1, temp1, temp2);
2349 2349
2350 __ Bind(&check_bigint); 2350 __ Bind(&check_bigint);
2351 __ LoadImmediate(temp1, kBigintCid); 2351 __ LoadImmediate(temp1, kBigintCid);
2352 __ LoadClassId(temp2, left); 2352 __ LoadClassId(temp2, left);
2353 __ bne(temp1, temp2, &reference_compare); 2353 __ bne(temp1, temp2, &reference_compare);
2354 __ LoadClassId(temp2, right); 2354 __ LoadClassId(temp2, right);
2355 __ subu(CMPRES, temp1, temp2); 2355 __ subu(CMPRES1, temp1, temp2);
2356 __ bne(CMPRES, ZR, &done); 2356 __ bne(CMPRES1, ZR, &done);
2357 2357
2358 __ EnterStubFrame(); 2358 __ EnterStubFrame();
2359 __ ReserveAlignedFrameSpace(2 * kWordSize); 2359 __ ReserveAlignedFrameSpace(2 * kWordSize);
2360 __ sw(left, Address(SP, 1 * kWordSize)); 2360 __ sw(left, Address(SP, 1 * kWordSize));
2361 __ sw(right, Address(SP, 0 * kWordSize)); 2361 __ sw(right, Address(SP, 0 * kWordSize));
2362 __ mov(A0, left); 2362 __ mov(A0, left);
2363 __ mov(A1, right); 2363 __ mov(A1, right);
2364 __ CallRuntime(kBigintCompareRuntimeEntry, 2); 2364 __ CallRuntime(kBigintCompareRuntimeEntry, 2);
2365 __ TraceSimMsg("IdenticalWithNumberCheckStub return"); 2365 __ TraceSimMsg("IdenticalWithNumberCheckStub return");
2366 // Result in V0, 0 means equal. 2366 // Result in V0, 0 means equal.
2367 __ LeaveStubFrame(); 2367 __ LeaveStubFrame();
2368 __ b(&done); 2368 __ b(&done);
2369 __ delay_slot()->mov(CMPRES, V0); 2369 __ delay_slot()->mov(CMPRES1, V0);
2370 2370
2371 __ Bind(&reference_compare); 2371 __ Bind(&reference_compare);
2372 __ subu(CMPRES, left, right); 2372 __ subu(CMPRES1, left, right);
2373 __ Bind(&done); 2373 __ Bind(&done);
2374 // A branch or test after this comparison will check CMPRES1 == CMPRES2. 2374 // A branch or test after this comparison will check CMPRES1 == CMPRES2.
2375 __ mov(CMPRES2, ZR); 2375 __ mov(CMPRES2, ZR);
2376 } 2376 }
2377 2377
2378 2378
2379 // Called only from unoptimized code. All relevant registers have been saved. 2379 // Called only from unoptimized code. All relevant registers have been saved.
2380 // RA: return address. 2380 // RA: return address.
2381 // SP + 4: left operand. 2381 // SP + 4: left operand.
2382 // SP + 0: right operand. 2382 // SP + 0: right operand.
2383 // Returns: CMPRES is zero if equal, non-zero otherwise. 2383 // Returns: CMPRES1 is zero if equal, non-zero otherwise.
2384 void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub( 2384 void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub(
2385 Assembler* assembler) { 2385 Assembler* assembler) {
2386 // Check single stepping. 2386 // Check single stepping.
2387 Label not_stepping; 2387 Label not_stepping;
2388 __ lw(T0, FieldAddress(CTX, Context::isolate_offset())); 2388 __ lw(T0, FieldAddress(CTX, Context::isolate_offset()));
2389 __ lbu(T0, Address(T0, Isolate::single_step_offset())); 2389 __ lbu(T0, Address(T0, Isolate::single_step_offset()));
2390 __ BranchEqual(T0, 0, &not_stepping); 2390 __ BranchEqual(T0, 0, &not_stepping);
2391 // Call single step callback in debugger. 2391 // Call single step callback in debugger.
2392 __ addiu(SP, SP, Immediate(-1 * kWordSize)); 2392 __ addiu(SP, SP, Immediate(-1 * kWordSize));
2393 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. 2393 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address.
(...skipping 11 matching lines...) Expand all
2405 __ lw(right, Address(SP, 0 * kWordSize)); 2405 __ lw(right, Address(SP, 0 * kWordSize));
2406 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp1, temp2); 2406 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp1, temp2);
2407 __ Ret(); 2407 __ Ret();
2408 } 2408 }
2409 2409
2410 2410
2411 // Called from otpimzied code only. Must preserve any registers that are 2411 // Called from otpimzied code only. Must preserve any registers that are
2412 // destroyed. 2412 // destroyed.
2413 // SP + 4: left operand. 2413 // SP + 4: left operand.
2414 // SP + 0: right operand. 2414 // SP + 0: right operand.
2415 // Returns: CMPRES is zero if equal, non-zero otherwise. 2415 // Returns: CMPRES1 is zero if equal, non-zero otherwise.
2416 void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub( 2416 void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub(
2417 Assembler* assembler) { 2417 Assembler* assembler) {
2418 const Register temp1 = T2; 2418 const Register temp1 = T2;
2419 const Register temp2 = T3; 2419 const Register temp2 = T3;
2420 const Register left = T1; 2420 const Register left = T1;
2421 const Register right = T0; 2421 const Register right = T0;
2422 // Preserve left, right. 2422 // Preserve left, right.
2423 __ addiu(SP, SP, Immediate(-4 * kWordSize)); 2423 __ addiu(SP, SP, Immediate(-4 * kWordSize));
2424 __ sw(temp1, Address(SP, 3 * kWordSize)); 2424 __ sw(temp1, Address(SP, 3 * kWordSize));
2425 __ sw(temp2, Address(SP, 2 * kWordSize)); 2425 __ sw(temp2, Address(SP, 2 * kWordSize));
2426 __ sw(left, Address(SP, 1 * kWordSize)); 2426 __ sw(left, Address(SP, 1 * kWordSize));
2427 __ sw(right, Address(SP, 0 * kWordSize)); 2427 __ sw(right, Address(SP, 0 * kWordSize));
2428 __ lw(left, Address(SP, 5 * kWordSize)); 2428 __ lw(left, Address(SP, 5 * kWordSize));
2429 __ lw(right, Address(SP, 4 * kWordSize)); 2429 __ lw(right, Address(SP, 4 * kWordSize));
2430 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp1, temp2); 2430 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp1, temp2);
2431 __ lw(right, Address(SP, 0 * kWordSize)); 2431 __ lw(right, Address(SP, 0 * kWordSize));
2432 __ lw(left, Address(SP, 1 * kWordSize)); 2432 __ lw(left, Address(SP, 1 * kWordSize));
2433 __ lw(temp2, Address(SP, 2 * kWordSize)); 2433 __ lw(temp2, Address(SP, 2 * kWordSize));
2434 __ lw(temp1, Address(SP, 3 * kWordSize)); 2434 __ lw(temp1, Address(SP, 3 * kWordSize));
2435 __ Ret(); 2435 __ Ret();
2436 __ delay_slot()->addiu(SP, SP, Immediate(4 * kWordSize)); 2436 __ delay_slot()->addiu(SP, SP, Immediate(4 * kWordSize));
2437 } 2437 }
2438 2438
2439 } // namespace dart 2439 } // namespace dart
2440 2440
2441 #endif // defined TARGET_ARCH_MIPS 2441 #endif // defined TARGET_ARCH_MIPS
OLDNEW
« runtime/vm/intermediate_language_ia32.cc ('K') | « runtime/vm/object_mips_test.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698