Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(181)

Side by Side Diff: runtime/vm/flow_graph_compiler_mips.cc

Issue 17131002: Enables language tests for SIMMIPS. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/disassembler_mips.cc ('k') | runtime/vm/instructions_mips.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS.
6 #if defined(TARGET_ARCH_MIPS) 6 #if defined(TARGET_ARCH_MIPS)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "lib/error.h" 10 #include "lib/error.h"
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
159 159
160 #define __ assembler()-> 160 #define __ assembler()->
161 161
162 162
163 // Fall through if bool_register contains null. 163 // Fall through if bool_register contains null.
164 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, 164 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
165 Label* is_true, 165 Label* is_true,
166 Label* is_false) { 166 Label* is_false) {
167 __ TraceSimMsg("BoolToJump"); 167 __ TraceSimMsg("BoolToJump");
168 Label fall_through; 168 Label fall_through;
169 __ beq(bool_register, NULLREG, &fall_through); 169 __ BranchEqual(bool_register, reinterpret_cast<int32_t>(Object::null()),
170 &fall_through);
170 __ BranchEqual(bool_register, Bool::True(), is_true); 171 __ BranchEqual(bool_register, Bool::True(), is_true);
171 __ b(is_false); 172 __ b(is_false);
172 __ Bind(&fall_through); 173 __ Bind(&fall_through);
173 } 174 }
174 175
175 176
176 // A0: instance (must be preserved). 177 // A0: instance (must be preserved).
177 // A1: instantiator type arguments (if used). 178 // A1: instantiator type arguments (if used).
178 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( 179 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
179 TypeTestStubKind test_kind, 180 TypeTestStubKind test_kind,
180 Register instance_reg, 181 Register instance_reg,
181 Register type_arguments_reg, 182 Register type_arguments_reg,
182 Register temp_reg, 183 Register temp_reg,
183 Label* is_instance_lbl, 184 Label* is_instance_lbl,
184 Label* is_not_instance_lbl) { 185 Label* is_not_instance_lbl) {
185 __ TraceSimMsg("CallSubtypeTestStub"); 186 __ TraceSimMsg("CallSubtypeTestStub");
186 ASSERT(instance_reg == A0); 187 ASSERT(instance_reg == A0);
187 ASSERT(temp_reg == kNoRegister); // Unused on MIPS. 188 ASSERT(temp_reg == kNoRegister); // Unused on MIPS.
188 const SubtypeTestCache& type_test_cache = 189 const SubtypeTestCache& type_test_cache =
189 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New()); 190 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New());
190 __ LoadObject(A2, type_test_cache); 191 __ LoadObject(A2, type_test_cache);
191 if (test_kind == kTestTypeOneArg) { 192 if (test_kind == kTestTypeOneArg) {
192 ASSERT(type_arguments_reg == kNoRegister); 193 ASSERT(type_arguments_reg == kNoRegister);
194 __ LoadImmediate(A1, reinterpret_cast<int32_t>(Object::null()));
193 __ BranchLink(&StubCode::Subtype1TestCacheLabel()); 195 __ BranchLink(&StubCode::Subtype1TestCacheLabel());
194 __ delay_slot()->mov(A1, NULLREG);
195 } else if (test_kind == kTestTypeTwoArgs) { 196 } else if (test_kind == kTestTypeTwoArgs) {
196 ASSERT(type_arguments_reg == kNoRegister); 197 ASSERT(type_arguments_reg == kNoRegister);
198 __ LoadImmediate(A1, reinterpret_cast<int32_t>(Object::null()));
197 __ BranchLink(&StubCode::Subtype2TestCacheLabel()); 199 __ BranchLink(&StubCode::Subtype2TestCacheLabel());
198 __ delay_slot()->mov(A1, NULLREG);
199 } else if (test_kind == kTestTypeThreeArgs) { 200 } else if (test_kind == kTestTypeThreeArgs) {
200 ASSERT(type_arguments_reg == A1); 201 ASSERT(type_arguments_reg == A1);
201 __ BranchLink(&StubCode::Subtype3TestCacheLabel()); 202 __ BranchLink(&StubCode::Subtype3TestCacheLabel());
202 } else { 203 } else {
203 UNREACHABLE(); 204 UNREACHABLE();
204 } 205 }
205 // Result is in V0: null -> not found, otherwise Bool::True or Bool::False. 206 // Result is in V0: null -> not found, otherwise Bool::True or Bool::False.
206 GenerateBoolToJump(V0, is_instance_lbl, is_not_instance_lbl); 207 GenerateBoolToJump(V0, is_instance_lbl, is_not_instance_lbl);
207 return type_test_cache.raw(); 208 return type_test_cache.raw();
208 } 209 }
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
322 // Bool interface can be implemented only by core class Bool. 323 // Bool interface can be implemented only by core class Bool.
323 if (type.IsBoolType()) { 324 if (type.IsBoolType()) {
324 __ BranchEqual(kClassIdReg, kBoolCid, is_instance_lbl); 325 __ BranchEqual(kClassIdReg, kBoolCid, is_instance_lbl);
325 __ b(is_not_instance_lbl); 326 __ b(is_not_instance_lbl);
326 return false; 327 return false;
327 } 328 }
328 if (type.IsFunctionType()) { 329 if (type.IsFunctionType()) {
329 // Check if instance is a closure. 330 // Check if instance is a closure.
330 __ LoadClassById(T1, kClassIdReg); 331 __ LoadClassById(T1, kClassIdReg);
331 __ lw(T1, FieldAddress(T1, Class::signature_function_offset())); 332 __ lw(T1, FieldAddress(T1, Class::signature_function_offset()));
332 __ bne(T1, NULLREG, is_instance_lbl); 333 __ BranchNotEqual(T1, reinterpret_cast<int32_t>(Object::null()),
334 is_instance_lbl);
333 } 335 }
334 // Custom checking for numbers (Smi, Mint, Bigint and Double). 336 // Custom checking for numbers (Smi, Mint, Bigint and Double).
335 // Note that instance is not Smi (checked above). 337 // Note that instance is not Smi (checked above).
336 if (type.IsSubtypeOf(Type::Handle(Type::Number()), NULL)) { 338 if (type.IsSubtypeOf(Type::Handle(Type::Number()), NULL)) {
337 GenerateNumberTypeCheck( 339 GenerateNumberTypeCheck(
338 kClassIdReg, type, is_instance_lbl, is_not_instance_lbl); 340 kClassIdReg, type, is_instance_lbl, is_not_instance_lbl);
339 return false; 341 return false;
340 } 342 }
341 if (type.IsStringType()) { 343 if (type.IsStringType()) {
342 GenerateStringTypeCheck(kClassIdReg, is_instance_lbl, is_not_instance_lbl); 344 GenerateStringTypeCheck(kClassIdReg, is_instance_lbl, is_not_instance_lbl);
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
390 __ TraceSimMsg("UninstantiatedTypeTest"); 392 __ TraceSimMsg("UninstantiatedTypeTest");
391 __ Comment("UninstantiatedTypeTest"); 393 __ Comment("UninstantiatedTypeTest");
392 ASSERT(!type.IsInstantiated()); 394 ASSERT(!type.IsInstantiated());
393 // Skip check if destination is a dynamic type. 395 // Skip check if destination is a dynamic type.
394 if (type.IsTypeParameter()) { 396 if (type.IsTypeParameter()) {
395 const TypeParameter& type_param = TypeParameter::Cast(type); 397 const TypeParameter& type_param = TypeParameter::Cast(type);
396 // Load instantiator (or null) and instantiator type arguments on stack. 398 // Load instantiator (or null) and instantiator type arguments on stack.
397 __ lw(A1, Address(SP, 0)); // Get instantiator type arguments. 399 __ lw(A1, Address(SP, 0)); // Get instantiator type arguments.
398 // A1: instantiator type arguments. 400 // A1: instantiator type arguments.
399 // Check if type argument is dynamic. 401 // Check if type argument is dynamic.
400 __ beq(A1, NULLREG, is_instance_lbl); 402 __ LoadImmediate(T7, reinterpret_cast<int32_t>(Object::null()));
403 __ beq(A1, T7, is_instance_lbl);
401 // Can handle only type arguments that are instances of TypeArguments. 404 // Can handle only type arguments that are instances of TypeArguments.
402 // (runtime checks canonicalize type arguments). 405 // (runtime checks canonicalize type arguments).
403 Label fall_through; 406 Label fall_through;
404 __ LoadClassId(T2, A1); 407 __ LoadClassId(T2, A1);
405 __ BranchNotEqual(T2, kTypeArgumentsCid, &fall_through); 408 __ BranchNotEqual(T2, kTypeArgumentsCid, &fall_through);
406 __ lw(T2, 409 __ lw(T2,
407 FieldAddress(A1, TypeArguments::type_at_offset(type_param.index()))); 410 FieldAddress(A1, TypeArguments::type_at_offset(type_param.index())));
408 // R2: concrete type of type. 411 // R2: concrete type of type.
409 // Check if type argument is dynamic. 412 // Check if type argument is dynamic.
410 __ BranchEqual(T2, Type::ZoneHandle(Type::DynamicType()), is_instance_lbl); 413 __ BranchEqual(T2, Type::ZoneHandle(Type::DynamicType()), is_instance_lbl);
411 __ beq(T2, NULLREG, is_instance_lbl); 414 __ beq(T2, T7, is_instance_lbl);
412 const Type& object_type = Type::ZoneHandle(Type::ObjectType()); 415 const Type& object_type = Type::ZoneHandle(Type::ObjectType());
413 __ BranchEqual(T2, object_type, is_instance_lbl); 416 __ BranchEqual(T2, object_type, is_instance_lbl);
414 417
415 // For Smi check quickly against int and num interfaces. 418 // For Smi check quickly against int and num interfaces.
416 Label not_smi; 419 Label not_smi;
417 __ andi(CMPRES, A0, Immediate(kSmiTagMask)); 420 __ andi(CMPRES, A0, Immediate(kSmiTagMask));
418 __ bne(CMPRES, ZR, &not_smi); // Value is Smi? 421 __ bne(CMPRES, ZR, &not_smi); // Value is Smi?
419 __ BranchEqual(T2, Type::ZoneHandle(Type::IntType()), is_instance_lbl); 422 __ BranchEqual(T2, Type::ZoneHandle(Type::IntType()), is_instance_lbl);
420 __ BranchEqual(T2, Type::ZoneHandle(Type::Number()), is_instance_lbl); 423 __ BranchEqual(T2, Type::ZoneHandle(Type::Number()), is_instance_lbl);
421 424
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
476 __ TraceSimMsg("InlineInstanceof"); 479 __ TraceSimMsg("InlineInstanceof");
477 __ Comment("InlineInstanceof"); 480 __ Comment("InlineInstanceof");
478 if (type.IsVoidType()) { 481 if (type.IsVoidType()) {
479 // A non-null value is returned from a void function, which will result in a 482 // A non-null value is returned from a void function, which will result in a
480 // type error. A null value is handled prior to executing this inline code. 483 // type error. A null value is handled prior to executing this inline code.
481 return SubtypeTestCache::null(); 484 return SubtypeTestCache::null();
482 } 485 }
483 if (TypeCheckAsClassEquality(type)) { 486 if (TypeCheckAsClassEquality(type)) {
484 const intptr_t type_cid = Class::Handle(type.type_class()).id(); 487 const intptr_t type_cid = Class::Handle(type.type_class()).id();
485 const Register kInstanceReg = A0; 488 const Register kInstanceReg = A0;
486 __ andi(T0, kInstanceReg, Immediate(kSmiTagMask)); 489 __ andi(CMPRES, kInstanceReg, Immediate(kSmiTagMask));
487 if (type_cid == kSmiCid) { 490 if (type_cid == kSmiCid) {
488 __ beq(T0, ZR, is_instance_lbl); 491 __ beq(CMPRES, ZR, is_instance_lbl);
489 } else { 492 } else {
490 __ beq(T0, ZR, is_not_instance_lbl); 493 __ beq(CMPRES, ZR, is_not_instance_lbl);
491 __ LoadClassId(T0, kInstanceReg); 494 __ LoadClassId(T0, kInstanceReg);
492 __ BranchEqual(T0, type_cid, is_instance_lbl); 495 __ BranchEqual(T0, type_cid, is_instance_lbl);
493 } 496 }
494 __ b(is_not_instance_lbl); 497 __ b(is_not_instance_lbl);
495 return SubtypeTestCache::null(); 498 return SubtypeTestCache::null();
496 } 499 }
497 if (type.IsInstantiated()) { 500 if (type.IsInstantiated()) {
498 const Class& type_class = Class::ZoneHandle(type.type_class()); 501 const Class& type_class = Class::ZoneHandle(type.type_class());
499 // A Smi object cannot be the instance of a parameterized class. 502 // A Smi object cannot be the instance of a parameterized class.
500 // A class equality check is only applicable with a dst type of a 503 // A class equality check is only applicable with a dst type of a
(...skipping 19 matching lines...) Expand all
520 return SubtypeTestCache::null(); 523 return SubtypeTestCache::null();
521 } 524 }
522 } 525 }
523 return GenerateUninstantiatedTypeTest(token_pos, 526 return GenerateUninstantiatedTypeTest(token_pos,
524 type, 527 type,
525 is_instance_lbl, 528 is_instance_lbl,
526 is_not_instance_lbl); 529 is_not_instance_lbl);
527 } 530 }
528 531
529 532
533 // If instanceof type test cannot be performed successfully at compile time and
534 // therefore eliminated, optimize it by adding inlined tests for:
535 // - NULL -> return false.
536 // - Smi -> compile time subtype check (only if dst class is not parameterized).
537 // - Class equality (only if class is not parameterized).
538 // Inputs:
539 // - A0: object.
540 // - A1: instantiator type arguments or raw_null.
541 // - A2: instantiator or raw_null.
542 // Returns:
543 // - true or false in V0.
530 void FlowGraphCompiler::GenerateInstanceOf(intptr_t token_pos, 544 void FlowGraphCompiler::GenerateInstanceOf(intptr_t token_pos,
531 intptr_t deopt_id, 545 intptr_t deopt_id,
532 const AbstractType& type, 546 const AbstractType& type,
533 bool negate_result, 547 bool negate_result,
534 LocationSummary* locs) { 548 LocationSummary* locs) {
535 UNIMPLEMENTED(); 549 ASSERT(type.IsFinalized() && !type.IsMalformed());
550
551 // Preserve instantiator (A2) and its type arguments (A1).
552 __ addiu(SP, SP, Immediate(-2 * kWordSize));
553 __ sw(A2, Address(SP, 1 * kWordSize));
554 __ sw(A1, Address(SP, 0 * kWordSize));
555
556 Label is_instance, is_not_instance;
557 // If type is instantiated and non-parameterized, we can inline code
558 // checking whether the tested instance is a Smi.
559 if (type.IsInstantiated()) {
560 // A null object is only an instance of Object and dynamic, which has
561 // already been checked above (if the type is instantiated). So we can
562 // return false here if the instance is null (and if the type is
563 // instantiated).
564 // We can only inline this null check if the type is instantiated at compile
565 // time, since an uninstantiated type at compile time could be Object or
566 // dynamic at run time.
567 __ BranchEqual(A0, reinterpret_cast<int32_t>(Object::null()),
568 &is_not_instance);
569 }
570
571 // Generate inline instanceof test.
572 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle();
573 test_cache = GenerateInlineInstanceof(token_pos, type,
574 &is_instance, &is_not_instance);
575
576 // test_cache is null if there is no fall-through.
577 Label done;
578 if (!test_cache.IsNull()) {
579 // Generate runtime call.
580 // Load instantiator (A2) and its type arguments (A1).
581 __ lw(A1, Address(SP, 0 * kWordSize));
582 __ lw(A2, Address(SP, 1 * kWordSize));
583
584 __ addiu(SP, SP, Immediate(-6 * kWordSize));
585 __ LoadObject(TMP, Object::ZoneHandle());
586 __ sw(TMP, Address(SP, 5 * kWordSize)); // Make room for the result.
587 __ sw(A0, Address(SP, 4 * kWordSize)); // Push the instance.
588 __ LoadObject(TMP, type);
589 __ sw(TMP, Address(SP, 3 * kWordSize)); // Push the type.
590 __ sw(A2, Address(SP, 2 * kWordSize)); // Push instantiator.
591 __ sw(A1, Address(SP, 1 * kWordSize)); // Push type arguments.
592 __ LoadObject(A0, test_cache);
593 __ sw(A0, Address(SP, 0 * kWordSize));
594 GenerateCallRuntime(token_pos, deopt_id, kInstanceofRuntimeEntry, locs);
595 // Pop the parameters supplied to the runtime entry. The result of the
596 // instanceof runtime call will be left as the result of the operation.
597 __ lw(T0, Address(SP, 5 * kWordSize));
598 __ addiu(SP, SP, Immediate(6 * kWordSize));
599 if (negate_result) {
600 __ LoadObject(V0, Bool::True());
601 __ bne(T0, V0, &done);
602 __ LoadObject(V0, Bool::False());
603 } else {
604 __ mov(V0, T0);
605 }
606 __ b(&done);
607 }
608 __ Bind(&is_not_instance);
609 __ LoadObject(V0, negate_result ? Bool::True() : Bool::False());
610 __ b(&done);
611
612 __ Bind(&is_instance);
613 __ LoadObject(V0, negate_result ? Bool::False() : Bool::True());
614 __ Bind(&done);
615 // Remove instantiator (A2) and its type arguments (A1).
616 __ Drop(2);
536 } 617 }
537 618
538 619
539 // Optimize assignable type check by adding inlined tests for: 620 // Optimize assignable type check by adding inlined tests for:
540 // - NULL -> return NULL. 621 // - NULL -> return NULL.
541 // - Smi -> compile time subtype check (only if dst class is not parameterized). 622 // - Smi -> compile time subtype check (only if dst class is not parameterized).
542 // - Class equality (only if class is not parameterized). 623 // - Class equality (only if class is not parameterized).
543 // Inputs: 624 // Inputs:
544 // - A0: instance being type checked. 625 // - A0: instance being type checked.
545 // - A1: instantiator type arguments or raw_null. 626 // - A1: instantiator type arguments or raw_null.
(...skipping 14 matching lines...) Expand all
560 ASSERT(dst_type.IsFinalized()); 641 ASSERT(dst_type.IsFinalized());
561 // Assignable check is skipped in FlowGraphBuilder, not here. 642 // Assignable check is skipped in FlowGraphBuilder, not here.
562 ASSERT(dst_type.IsMalformed() || 643 ASSERT(dst_type.IsMalformed() ||
563 (!dst_type.IsDynamicType() && !dst_type.IsObjectType())); 644 (!dst_type.IsDynamicType() && !dst_type.IsObjectType()));
564 // Preserve instantiator and its type arguments. 645 // Preserve instantiator and its type arguments.
565 __ addiu(SP, SP, Immediate(-2 * kWordSize)); 646 __ addiu(SP, SP, Immediate(-2 * kWordSize));
566 __ sw(A2, Address(SP, 1 * kWordSize)); 647 __ sw(A2, Address(SP, 1 * kWordSize));
567 648
568 // A null object is always assignable and is returned as result. 649 // A null object is always assignable and is returned as result.
569 Label is_assignable, runtime_call; 650 Label is_assignable, runtime_call;
570 __ beq(A0, NULLREG, &is_assignable); 651
652 __ BranchEqual(A0, reinterpret_cast<int32_t>(Object::null()), &is_assignable);
571 __ delay_slot()->sw(A1, Address(SP, 0 * kWordSize)); 653 __ delay_slot()->sw(A1, Address(SP, 0 * kWordSize));
572 654
573 if (!FLAG_eliminate_type_checks) { 655 if (!FLAG_eliminate_type_checks) {
574 // If type checks are not eliminated during the graph building then 656 // If type checks are not eliminated during the graph building then
575 // a transition sentinel can be seen here. 657 // a transition sentinel can be seen here.
576 __ BranchEqual(A0, Object::transition_sentinel(), &is_assignable); 658 __ BranchEqual(A0, Object::transition_sentinel(), &is_assignable);
577 } 659 }
578 660
579 // Generate throw new TypeError() if the type is malformed. 661 // Generate throw new TypeError() if the type is malformed.
580 if (dst_type.IsMalformed()) { 662 if (dst_type.IsMalformed()) {
(...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after
797 // We do not use the final allocation index of the variable here, i.e. 879 // We do not use the final allocation index of the variable here, i.e.
798 // scope->VariableAt(i)->index(), because captured variables still need 880 // scope->VariableAt(i)->index(), because captured variables still need
799 // to be copied to the context that is not yet allocated. 881 // to be copied to the context that is not yet allocated.
800 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; 882 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
801 __ sw(T3, Address(FP, computed_param_pos * kWordSize)); 883 __ sw(T3, Address(FP, computed_param_pos * kWordSize));
802 } 884 }
803 delete[] opt_param; 885 delete[] opt_param;
804 delete[] opt_param_position; 886 delete[] opt_param_position;
805 // Check that T0 now points to the null terminator in the array descriptor. 887 // Check that T0 now points to the null terminator in the array descriptor.
806 __ lw(T3, Address(T0)); 888 __ lw(T3, Address(T0));
807 __ beq(T3, NULLREG, &all_arguments_processed); 889 __ BranchEqual(T3, reinterpret_cast<int32_t>(Object::null()),
890 &all_arguments_processed);
808 } else { 891 } else {
809 ASSERT(num_opt_pos_params > 0); 892 ASSERT(num_opt_pos_params > 0);
810 __ Comment("There are optional positional parameters"); 893 __ Comment("There are optional positional parameters");
811 __ lw(T2, 894 __ lw(T2,
812 FieldAddress(S4, ArgumentsDescriptor::positional_count_offset())); 895 FieldAddress(S4, ArgumentsDescriptor::positional_count_offset()));
813 __ SmiUntag(T2); 896 __ SmiUntag(T2);
814 for (int i = 0; i < num_opt_pos_params; i++) { 897 for (int i = 0; i < num_opt_pos_params; i++) {
815 Label next_parameter; 898 Label next_parameter;
816 // Handle this optional positional parameter only if k or fewer positional 899 // Handle this optional positional parameter only if k or fewer positional
817 // arguments have been passed, where k is param_pos, the position of this 900 // arguments have been passed, where k is param_pos, the position of this
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
887 __ sll(T2, T2, 1); // T2 is a Smi. 970 __ sll(T2, T2, 1); // T2 is a Smi.
888 971
889 __ Comment("Null arguments loop"); 972 __ Comment("Null arguments loop");
890 Label null_args_loop, null_args_loop_exit; 973 Label null_args_loop, null_args_loop_exit;
891 __ blez(T2, &null_args_loop_exit); 974 __ blez(T2, &null_args_loop_exit);
892 __ delay_slot()->addiu(T1, FP, 975 __ delay_slot()->addiu(T1, FP,
893 Immediate((kParamEndSlotFromFp + 1) * kWordSize)); 976 Immediate((kParamEndSlotFromFp + 1) * kWordSize));
894 __ Bind(&null_args_loop); 977 __ Bind(&null_args_loop);
895 __ addiu(T2, T2, Immediate(-kWordSize)); 978 __ addiu(T2, T2, Immediate(-kWordSize));
896 __ addu(T3, T1, T2); 979 __ addu(T3, T1, T2);
980 __ LoadImmediate(TMP, reinterpret_cast<int32_t>(Object::null()));
897 __ bgtz(T2, &null_args_loop); 981 __ bgtz(T2, &null_args_loop);
898 __ delay_slot()->sw(NULLREG, Address(T3)); 982 __ delay_slot()->sw(TMP, Address(T3));
899 __ Bind(&null_args_loop_exit); 983 __ Bind(&null_args_loop_exit);
900 } 984 }
901 985
902 986
903 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { 987 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
904 // RA: return address. 988 // RA: return address.
905 // SP: receiver. 989 // SP: receiver.
906 // Sequence node has one return node, its input is load field node. 990 // Sequence node has one return node, its input is load field node.
907 __ lw(V0, Address(SP, 0 * kWordSize)); 991 __ lw(V0, Address(SP, 0 * kWordSize));
908 __ lw(V0, Address(V0, offset - kHeapObjectTag)); 992 __ lw(V0, Address(V0, offset - kHeapObjectTag));
909 __ Ret(); 993 __ Ret();
910 } 994 }
911 995
912 996
913 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { 997 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
914 // RA: return address. 998 // RA: return address.
915 // SP+1: receiver. 999 // SP+1: receiver.
916 // SP+0: value. 1000 // SP+0: value.
917 // Sequence node has one store node and one return NULL node. 1001 // Sequence node has one store node and one return NULL node.
918 __ lw(T0, Address(SP, 1 * kWordSize)); // Receiver. 1002 __ lw(T0, Address(SP, 1 * kWordSize)); // Receiver.
919 __ lw(T1, Address(SP, 0 * kWordSize)); // Value. 1003 __ lw(T1, Address(SP, 0 * kWordSize)); // Value.
920 __ StoreIntoObject(T0, FieldAddress(T0, offset), T1); 1004 __ StoreIntoObject(T0, FieldAddress(T0, offset), T1);
1005 __ LoadImmediate(TMP, reinterpret_cast<int32_t>(Object::null()));
921 __ Ret(); 1006 __ Ret();
922 __ delay_slot()->mov(V0, NULLREG); 1007 __ delay_slot()->mov(V0, TMP);
923 } 1008 }
924 1009
925 1010
926 void FlowGraphCompiler::EmitFrameEntry() { 1011 void FlowGraphCompiler::EmitFrameEntry() {
927 const Function& function = parsed_function().function(); 1012 const Function& function = parsed_function().function();
928 if (CanOptimizeFunction() && function.is_optimizable()) { 1013 if (CanOptimizeFunction() && function.is_optimizable()) {
929 const bool can_optimize = !is_optimizing() || may_reoptimize(); 1014 const bool can_optimize = !is_optimizing() || may_reoptimize();
930 const Register function_reg = T0; 1015 const Register function_reg = T0;
931 if (can_optimize) { 1016 if (can_optimize) {
932 Label next; 1017 Label next;
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after
1106 } 1191 }
1107 1192
1108 // In unoptimized code, initialize (non-argument) stack allocated slots to 1193 // In unoptimized code, initialize (non-argument) stack allocated slots to
1109 // null. This does not cover the saved_args_desc_var slot. 1194 // null. This does not cover the saved_args_desc_var slot.
1110 if (!is_optimizing() && (num_locals > 0)) { 1195 if (!is_optimizing() && (num_locals > 0)) {
1111 __ TraceSimMsg("Initialize spill slots"); 1196 __ TraceSimMsg("Initialize spill slots");
1112 __ Comment("Initialize spill slots"); 1197 __ Comment("Initialize spill slots");
1113 const intptr_t slot_base = parsed_function().first_stack_local_index(); 1198 const intptr_t slot_base = parsed_function().first_stack_local_index();
1114 for (intptr_t i = 0; i < num_locals; ++i) { 1199 for (intptr_t i = 0; i < num_locals; ++i) {
1115 // Subtract index i (locals lie at lower addresses than FP). 1200 // Subtract index i (locals lie at lower addresses than FP).
1116 __ sw(NULLREG, Address(FP, (slot_base - i) * kWordSize)); 1201 __ LoadImmediate(TMP, reinterpret_cast<int32_t>(Object::null()));
1202 __ sw(TMP, Address(FP, (slot_base - i) * kWordSize));
1117 } 1203 }
1118 } 1204 }
1119 1205
1120 if (FLAG_print_scopes) { 1206 if (FLAG_print_scopes) {
1121 // Print the function scope (again) after generating the prologue in order 1207 // Print the function scope (again) after generating the prologue in order
1122 // to see annotations such as allocation indices of locals. 1208 // to see annotations such as allocation indices of locals.
1123 if (FLAG_print_ast) { 1209 if (FLAG_print_ast) {
1124 // Second printing. 1210 // Second printing.
1125 OS::Print("Annotated "); 1211 OS::Print("Annotated ");
1126 } 1212 }
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after
1341 void FlowGraphCompiler::EmitEqualityRegConstCompare(Register reg, 1427 void FlowGraphCompiler::EmitEqualityRegConstCompare(Register reg,
1342 const Object& obj, 1428 const Object& obj,
1343 bool needs_number_check, 1429 bool needs_number_check,
1344 intptr_t token_pos) { 1430 intptr_t token_pos) {
1345 __ TraceSimMsg("EqualityRegConstCompare"); 1431 __ TraceSimMsg("EqualityRegConstCompare");
1346 if (needs_number_check && 1432 if (needs_number_check &&
1347 (obj.IsMint() || obj.IsDouble() || obj.IsBigint())) { 1433 (obj.IsMint() || obj.IsDouble() || obj.IsBigint())) {
1348 __ addiu(SP, SP, Immediate(-2 * kWordSize)); 1434 __ addiu(SP, SP, Immediate(-2 * kWordSize));
1349 __ sw(reg, Address(SP, 1 * kWordSize)); 1435 __ sw(reg, Address(SP, 1 * kWordSize));
1350 __ LoadObject(TMP1, obj); 1436 __ LoadObject(TMP1, obj);
1437 __ sw(TMP1, Address(SP, 0 * kWordSize));
1351 __ BranchLink(&StubCode::IdenticalWithNumberCheckLabel()); 1438 __ BranchLink(&StubCode::IdenticalWithNumberCheckLabel());
1352 AddCurrentDescriptor(PcDescriptors::kRuntimeCall, 1439 AddCurrentDescriptor(PcDescriptors::kRuntimeCall,
1353 Isolate::kNoDeoptId, 1440 Isolate::kNoDeoptId,
1354 token_pos); 1441 token_pos);
1355 __ delay_slot()->sw(TMP1, Address(SP, 0 * kWordSize));
1356 __ TraceSimMsg("EqualityRegConstCompare return"); 1442 __ TraceSimMsg("EqualityRegConstCompare return");
1357 __ lw(reg, Address(SP, 1 * kWordSize)); // Restore 'reg'. 1443 __ lw(reg, Address(SP, 1 * kWordSize)); // Restore 'reg'.
1358 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Discard constant. 1444 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Discard constant.
1359 return; 1445 return;
1360 } 1446 }
1361 __ CompareObject(CMPRES, TMP1, reg, obj); 1447 __ CompareObject(CMPRES, TMP1, reg, obj);
1362 } 1448 }
1363 1449
1364 1450
1365 void FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, 1451 void FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
1366 Register right, 1452 Register right,
1367 bool needs_number_check, 1453 bool needs_number_check,
1368 intptr_t token_pos) { 1454 intptr_t token_pos) {
1369 __ TraceSimMsg("EqualityRegRegCompare"); 1455 __ TraceSimMsg("EqualityRegRegCompare");
1456 __ Comment("EqualityRegRegCompare");
1370 if (needs_number_check) { 1457 if (needs_number_check) {
1371 __ addiu(SP, SP, Immediate(-2 * kWordSize)); 1458 __ addiu(SP, SP, Immediate(-2 * kWordSize));
1372 __ sw(left, Address(SP, 1 * kWordSize)); 1459 __ sw(left, Address(SP, 1 * kWordSize));
1460 __ sw(right, Address(SP, 0 * kWordSize));
1373 __ BranchLink(&StubCode::IdenticalWithNumberCheckLabel()); 1461 __ BranchLink(&StubCode::IdenticalWithNumberCheckLabel());
1374 AddCurrentDescriptor(PcDescriptors::kRuntimeCall, 1462 AddCurrentDescriptor(PcDescriptors::kRuntimeCall,
1375 Isolate::kNoDeoptId, 1463 Isolate::kNoDeoptId,
1376 token_pos); 1464 token_pos);
1377 __ delay_slot()->sw(right, Address(SP, 0 * kWordSize));
1378 __ TraceSimMsg("EqualityRegRegCompare return"); 1465 __ TraceSimMsg("EqualityRegRegCompare return");
1379 // Stub returns result in CMPRES. If it is 0, then left and right are equal. 1466 // Stub returns result in CMPRES. If it is 0, then left and right are equal.
1380 __ lw(right, Address(SP, 0 * kWordSize)); 1467 __ lw(right, Address(SP, 0 * kWordSize));
1381 __ lw(left, Address(SP, 1 * kWordSize)); 1468 __ lw(left, Address(SP, 1 * kWordSize));
1382 __ addiu(SP, SP, Immediate(2 * kWordSize)); 1469 __ addiu(SP, SP, Immediate(2 * kWordSize));
1383 } else { 1470 } else {
1384 __ slt(CMPRES, left, right); 1471 __ slt(CMPRES, left, right);
1385 __ slt(TMP1, right, left); 1472 __ slt(TMP1, right, left);
1386 } 1473 }
1387 } 1474 }
1388 1475
1389 1476
1390 // Implement equality spec: if any of the arguments is null do identity check. 1477 // Implement equality spec: if any of the arguments is null do identity check.
1391 // Fallthrough calls super equality. 1478 // Fallthrough calls super equality.
1392 void FlowGraphCompiler::EmitSuperEqualityCallPrologue(Register result, 1479 void FlowGraphCompiler::EmitSuperEqualityCallPrologue(Register result,
1393 Label* skip_call) { 1480 Label* skip_call) {
1394 Label check_identity, is_false, fall_through; 1481 Label check_identity, is_false, fall_through;
1395 __ TraceSimMsg("SuperEqualityCallPrologue"); 1482 __ TraceSimMsg("SuperEqualityCallPrologue");
1396 __ lw(result, Address(SP, 0 * kWordSize)); // Load right operand. 1483 __ lw(result, Address(SP, 0 * kWordSize)); // Load right operand.
1397 __ lw(TMP1, Address(SP, 1 * kWordSize)); // Load left operand. 1484 __ lw(TMP1, Address(SP, 1 * kWordSize)); // Load left operand.
1398 __ beq(result, NULLREG, &check_identity); // Is right null? 1485 __ LoadImmediate(CMPRES, reinterpret_cast<int32_t>(Object::null()));
1399 __ bne(TMP1, NULLREG, &fall_through); // If right is non-null, check left. 1486 __ beq(result, CMPRES, &check_identity); // Is right null?
1487 __ bne(TMP1, CMPRES, &fall_through); // If right is non-null, check left.
1400 1488
1401 __ Bind(&check_identity); 1489 __ Bind(&check_identity);
1402 __ bne(result, TMP1, &is_false); 1490 __ bne(result, TMP1, &is_false);
1403 __ LoadObject(result, Bool::True()); 1491 __ LoadObject(result, Bool::True());
1404 __ Drop(2); 1492 __ Drop(2);
1405 __ b(skip_call); 1493 __ b(skip_call);
1406 __ Bind(&is_false); 1494 __ Bind(&is_false);
1407 __ LoadObject(result, Bool::False()); 1495 __ LoadObject(result, Bool::False());
1408 __ Drop(2); 1496 __ Drop(2);
1409 __ b(skip_call); 1497 __ b(skip_call);
1410 __ Bind(&fall_through); 1498 __ Bind(&fall_through);
1411 } 1499 }
1412 1500
1413 1501
1414 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { 1502 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
1415 __ TraceSimMsg("SaveLiveRegisters"); 1503 __ TraceSimMsg("SaveLiveRegisters");
1416 // TODO(vegorov): consider saving only caller save (volatile) registers. 1504 // TODO(vegorov): consider saving only caller save (volatile) registers.
1417 const intptr_t fpu_registers = locs->live_registers()->fpu_registers(); 1505 const intptr_t fpu_regs_count= locs->live_registers()->fpu_regs_count();
1418 if (fpu_registers > 0) { 1506 if (fpu_regs_count > 0) {
1419 UNIMPLEMENTED(); 1507 __ AddImmediate(SP, -(fpu_regs_count * kFpuRegisterSize));
1508 // Store fpu registers with the lowest register number at the lowest
1509 // address.
1510 intptr_t offset = 0;
1511 for (intptr_t reg_idx = 0; reg_idx < kNumberOfFpuRegisters; ++reg_idx) {
1512 DRegister fpu_reg = static_cast<DRegister>(reg_idx);
1513 if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) {
1514 __ StoreDToOffset(fpu_reg, SP, offset);
1515 offset += kFpuRegisterSize;
1516 }
1517 }
1518 ASSERT(offset == (fpu_regs_count * kFpuRegisterSize));
1420 } 1519 }
1421 1520
1422 // Store general purpose registers with the lowest register number at the 1521 // Store general purpose registers with the lowest register number at the
1423 // lowest address. 1522 // lowest address.
1424 const intptr_t cpu_registers = locs->live_registers()->cpu_registers(); 1523 const intptr_t cpu_registers = locs->live_registers()->cpu_registers();
1425 ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0); 1524 ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0);
1426 const int register_count = Utils::CountOneBits(cpu_registers); 1525 const int register_count = Utils::CountOneBits(cpu_registers);
1427 int registers_pushed = 0; 1526 int registers_pushed = 0;
1428 1527
1429 __ addiu(SP, SP, Immediate(-register_count * kWordSize)); 1528 __ addiu(SP, SP, Immediate(-register_count * kWordSize));
(...skipping 18 matching lines...) Expand all
1448 1547
1449 for (int i = 0; i < kNumberOfCpuRegisters; i++) { 1548 for (int i = 0; i < kNumberOfCpuRegisters; i++) {
1450 Register r = static_cast<Register>(i); 1549 Register r = static_cast<Register>(i);
1451 if (locs->live_registers()->ContainsRegister(r)) { 1550 if (locs->live_registers()->ContainsRegister(r)) {
1452 __ lw(r, Address(SP, registers_popped * kWordSize)); 1551 __ lw(r, Address(SP, registers_popped * kWordSize));
1453 registers_popped++; 1552 registers_popped++;
1454 } 1553 }
1455 } 1554 }
1456 __ addiu(SP, SP, Immediate(register_count * kWordSize)); 1555 __ addiu(SP, SP, Immediate(register_count * kWordSize));
1457 1556
1458 const intptr_t fpu_registers = locs->live_registers()->fpu_registers(); 1557 const intptr_t fpu_regs_count = locs->live_registers()->fpu_regs_count();
1459 if (fpu_registers > 0) { 1558 if (fpu_regs_count > 0) {
1460 UNIMPLEMENTED(); 1559 // Fpu registers have the lowest register number at the lowest address.
1560 intptr_t offset = 0;
1561 for (intptr_t reg_idx = 0; reg_idx < kNumberOfFpuRegisters; ++reg_idx) {
1562 DRegister fpu_reg = static_cast<DRegister>(reg_idx);
1563 if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) {
1564 __ LoadDFromOffset(fpu_reg, SP, offset);
1565 offset += kFpuRegisterSize;
1566 }
1567 }
1568 ASSERT(offset == (fpu_regs_count * kFpuRegisterSize));
1569 __ AddImmediate(SP, offset);
1461 } 1570 }
1462 } 1571 }
1463 1572
1464 1573
1465 void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data, 1574 void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data,
1466 Register class_id_reg, 1575 Register class_id_reg,
1467 intptr_t argument_count, 1576 intptr_t argument_count,
1468 const Array& argument_names, 1577 const Array& argument_names,
1469 Label* deopt, 1578 Label* deopt,
1470 intptr_t deopt_id, 1579 intptr_t deopt_id,
1471 intptr_t token_index, 1580 intptr_t token_index,
1472 LocationSummary* locs) { 1581 LocationSummary* locs) {
1473 ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0)); 1582 ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0));
1474 Label match_found; 1583 Label match_found;
1475 const intptr_t len = ic_data.NumberOfChecks(); 1584 const intptr_t len = ic_data.NumberOfChecks();
1476 GrowableArray<CidTarget> sorted(len); 1585 GrowableArray<CidTarget> sorted(len);
1477 SortICDataByCount(ic_data, &sorted); 1586 SortICDataByCount(ic_data, &sorted);
1478 ASSERT(class_id_reg != S4); 1587 ASSERT(class_id_reg != S4);
1479 ASSERT(len > 0); // Why bother otherwise. 1588 ASSERT(len > 0); // Why bother otherwise.
1480 const Array& arguments_descriptor = 1589 const Array& arguments_descriptor =
1481 Array::ZoneHandle(ArgumentsDescriptor::New(argument_count, 1590 Array::ZoneHandle(ArgumentsDescriptor::New(argument_count,
1482 argument_names)); 1591 argument_names));
1483 __ TraceSimMsg("EmitTestAndCall"); 1592 __ TraceSimMsg("EmitTestAndCall");
1593 __ Comment("EmitTestAndCall");
1484 __ LoadObject(S4, arguments_descriptor); 1594 __ LoadObject(S4, arguments_descriptor);
1485 for (intptr_t i = 0; i < len; i++) { 1595 for (intptr_t i = 0; i < len; i++) {
1486 const bool is_last_check = (i == (len - 1)); 1596 const bool is_last_check = (i == (len - 1));
1487 Label next_test; 1597 Label next_test;
1488 if (is_last_check) { 1598 if (is_last_check) {
1489 __ BranchNotEqual(class_id_reg, sorted[i].cid, deopt); 1599 __ BranchNotEqual(class_id_reg, sorted[i].cid, deopt);
1490 } else { 1600 } else {
1491 __ BranchNotEqual(class_id_reg, sorted[i].cid, &next_test); 1601 __ BranchNotEqual(class_id_reg, sorted[i].cid, &next_test);
1492 } 1602 }
1493 // Do not use the code from the function, but let the code be patched so 1603 // Do not use the code from the function, but let the code be patched so
(...skipping 12 matching lines...) Expand all
1506 __ Bind(&next_test); 1616 __ Bind(&next_test);
1507 } 1617 }
1508 __ Bind(&match_found); 1618 __ Bind(&match_found);
1509 } 1619 }
1510 1620
1511 1621
1512 void FlowGraphCompiler::EmitDoubleCompareBranch(Condition true_condition, 1622 void FlowGraphCompiler::EmitDoubleCompareBranch(Condition true_condition,
1513 FpuRegister left, 1623 FpuRegister left,
1514 FpuRegister right, 1624 FpuRegister right,
1515 BranchInstr* branch) { 1625 BranchInstr* branch) {
1516 UNIMPLEMENTED(); 1626 ASSERT(branch != NULL);
1627 __ Comment("DoubleCompareBranch");
1628 assembler()->cund(left, right);
1629 BlockEntryInstr* nan_result = (true_condition == NE) ?
1630 branch->true_successor() : branch->false_successor();
1631 assembler()->bc1t(GetJumpLabel(nan_result));
1632
1633 switch (true_condition) {
1634 case EQ: assembler()->ceqd(left, right); break;
1635 case LT: assembler()->coltd(left, right); break;
1636 case LE: assembler()->coled(left, right); break;
1637 case GT: assembler()->coltd(right, left); break;
1638 case GE: assembler()->coled(right, left); break;
1639 default: {
1640 // Should only passing the above conditions to this function.
1641 UNREACHABLE();
1642 break;
1643 }
1644 }
1645
1646 assembler()->LoadImmediate(TMP, 1);
1647 assembler()->movf(CMPRES, TMP);
1648 assembler()->movt(CMPRES, ZR);
1649 assembler()->mov(TMP, ZR);
1650
1651 // EmitBranchOnCondition expects ordering to be described by CMPRES, TMP1.
1652 branch->EmitBranchOnCondition(this, EQ);
1517 } 1653 }
1518 1654
1519 1655
1520 void FlowGraphCompiler::EmitDoubleCompareBool(Condition true_condition, 1656 void FlowGraphCompiler::EmitDoubleCompareBool(Condition true_condition,
1521 FpuRegister left, 1657 FpuRegister left,
1522 FpuRegister right, 1658 FpuRegister right,
1523 Register result) { 1659 Register result) {
1524 UNIMPLEMENTED(); 1660 Label done;
1661 __ Comment("DoubleCompareBool");
1662 assembler()->LoadObject(result, Bool::False());
1663 assembler()->cund(left, right);
1664 assembler()->bc1t(&done);
1665
1666 switch (true_condition) {
1667 case EQ: assembler()->ceqd(left, right); break;
1668 case LT: assembler()->coltd(left, right); break;
1669 case LE: assembler()->coled(left, right); break;
1670 case GT: assembler()->coltd(right, left); break;
1671 case GE: assembler()->coled(right, left); break;
1672 default: {
1673 // Should only passing the above conditions to this function.
1674 UNREACHABLE();
1675 break;
1676 }
1677 }
1678
1679 assembler()->bc1f(&done); // False is already in result.
1680 assembler()->LoadObject(result, Bool::True());
1681 assembler()->Bind(&done);
1525 } 1682 }
1526 1683
1527 1684
1528 FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid, 1685 FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
1529 intptr_t index_scale, 1686 intptr_t index_scale,
1530 Register array, 1687 Register array,
1531 intptr_t index) { 1688 intptr_t index) {
1532 UNREACHABLE(); 1689 UNREACHABLE();
1533 return FieldAddress(array, index); 1690 return FieldAddress(array, index);
1534 } 1691 }
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
1584 } else { 1741 } else {
1585 ASSERT(destination.IsStackSlot()); 1742 ASSERT(destination.IsStackSlot());
1586 MoveMemoryToMemory(destination.ToStackSlotAddress(), 1743 MoveMemoryToMemory(destination.ToStackSlotAddress(),
1587 source.ToStackSlotAddress()); 1744 source.ToStackSlotAddress());
1588 } 1745 }
1589 } else if (source.IsFpuRegister()) { 1746 } else if (source.IsFpuRegister()) {
1590 if (destination.IsFpuRegister()) { 1747 if (destination.IsFpuRegister()) {
1591 __ movd(destination.fpu_reg(), source.fpu_reg()); 1748 __ movd(destination.fpu_reg(), source.fpu_reg());
1592 } else { 1749 } else {
1593 if (destination.IsDoubleStackSlot()) { 1750 if (destination.IsDoubleStackSlot()) {
1594 __ sdc1(source.fpu_reg(), destination.ToStackSlotAddress()); 1751 const Address& addr = destination.ToStackSlotAddress();
1752 int32_t offset = addr.offset();
1753 __ StoreDToOffset(source.fpu_reg(), FP, offset);
1595 } else { 1754 } else {
1596 ASSERT(destination.IsQuadStackSlot()); 1755 ASSERT(destination.IsQuadStackSlot());
1597 UNIMPLEMENTED(); 1756 UNIMPLEMENTED();
1598 } 1757 }
1599 } 1758 }
1600 } else if (source.IsDoubleStackSlot()) { 1759 } else if (source.IsDoubleStackSlot()) {
1601 if (destination.IsFpuRegister()) { 1760 if (destination.IsFpuRegister()) {
1602 __ ldc1(destination.fpu_reg(), source.ToStackSlotAddress()); 1761 const Address &addr = source.ToStackSlotAddress();
1762 const Register base = addr.base();
1763 const int32_t offset = addr.offset();
1764 __ LoadDFromOffset(destination.fpu_reg(), base, offset);
1603 } else { 1765 } else {
1604 ASSERT(destination.IsDoubleStackSlot()); 1766 ASSERT(destination.IsDoubleStackSlot());
1605 __ ldc1(FpuTMP, source.ToStackSlotAddress()); 1767 const Address& saddr = source.ToStackSlotAddress();
1606 __ sdc1(FpuTMP, destination.ToStackSlotAddress()); 1768 const Address& daddr = destination.ToStackSlotAddress();
1769 int32_t soffset = saddr.offset();
1770 int32_t doffset = daddr.offset();
1771 __ LoadDFromOffset(FpuTMP, FP, soffset);
1772 __ StoreDToOffset(FpuTMP, FP, doffset);
1607 } 1773 }
1608 } else if (source.IsQuadStackSlot()) { 1774 } else if (source.IsQuadStackSlot()) {
1609 UNIMPLEMENTED(); 1775 UNIMPLEMENTED();
1610 } else { 1776 } else {
1611 ASSERT(source.IsConstant()); 1777 ASSERT(source.IsConstant());
1612 if (destination.IsRegister()) { 1778 if (destination.IsRegister()) {
1613 const Object& constant = source.constant(); 1779 const Object& constant = source.constant();
1614 __ LoadObject(destination.reg(), constant); 1780 __ LoadObject(destination.reg(), constant);
1615 } else { 1781 } else {
1616 ASSERT(destination.IsStackSlot()); 1782 ASSERT(destination.IsStackSlot());
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1651 source.IsQuadStackSlot()); 1817 source.IsQuadStackSlot());
1652 bool double_width = destination.IsDoubleStackSlot() || 1818 bool double_width = destination.IsDoubleStackSlot() ||
1653 source.IsDoubleStackSlot(); 1819 source.IsDoubleStackSlot();
1654 DRegister reg = source.IsFpuRegister() ? source.fpu_reg() 1820 DRegister reg = source.IsFpuRegister() ? source.fpu_reg()
1655 : destination.fpu_reg(); 1821 : destination.fpu_reg();
1656 const Address& slot_address = source.IsFpuRegister() 1822 const Address& slot_address = source.IsFpuRegister()
1657 ? destination.ToStackSlotAddress() 1823 ? destination.ToStackSlotAddress()
1658 : source.ToStackSlotAddress(); 1824 : source.ToStackSlotAddress();
1659 1825
1660 if (double_width) { 1826 if (double_width) {
1661 __ ldc1(FpuTMP, slot_address); 1827 const Register base = slot_address.base();
1662 __ sdc1(reg, slot_address); 1828 const int32_t offset = slot_address.offset();
1829 __ LoadDFromOffset(FpuTMP, base, offset);
1830 __ StoreDToOffset(reg, base, offset);
1663 __ movd(reg, FpuTMP); 1831 __ movd(reg, FpuTMP);
1664 } else { 1832 } else {
1665 UNIMPLEMENTED(); 1833 UNIMPLEMENTED();
1666 } 1834 }
1667 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { 1835 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1668 const Address& source_slot_address = source.ToStackSlotAddress(); 1836 const Address& source_slot_address = source.ToStackSlotAddress();
1669 const Address& destination_slot_address = destination.ToStackSlotAddress(); 1837 const Address& destination_slot_address = destination.ToStackSlotAddress();
1838 const Register sbase = source_slot_address.base();
1839 const int32_t soffset = source_slot_address.offset();
1840 const Register dbase = destination_slot_address.base();
1841 const int32_t doffset = destination_slot_address.offset();
1670 1842
1671 ScratchFpuRegisterScope ensure_scratch(this, FpuTMP); 1843 ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
1672 __ ldc1(FpuTMP, source_slot_address); 1844 __ LoadDFromOffset(FpuTMP, sbase, soffset);
1673 __ ldc1(ensure_scratch.reg(), destination_slot_address); 1845 __ LoadDFromOffset(ensure_scratch.reg(), dbase, doffset);
1674 __ sdc1(FpuTMP, destination_slot_address); 1846 __ StoreDToOffset(FpuTMP, dbase, doffset);
1675 __ sdc1(ensure_scratch.reg(), source_slot_address); 1847 __ StoreDToOffset(ensure_scratch.reg(), sbase, soffset);
1676 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) { 1848 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1677 UNIMPLEMENTED(); 1849 UNIMPLEMENTED();
1678 } else { 1850 } else {
1679 UNREACHABLE(); 1851 UNREACHABLE();
1680 } 1852 }
1681 1853
1682 // The swap of source and destination has executed a move from source to 1854 // The swap of source and destination has executed a move from source to
1683 // destination. 1855 // destination.
1684 move->Eliminate(); 1856 move->Eliminate();
1685 1857
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
1739 1911
1740 void ParallelMoveResolver::RestoreScratch(Register reg) { 1912 void ParallelMoveResolver::RestoreScratch(Register reg) {
1741 __ TraceSimMsg("ParallelMoveResolver::RestoreScratch"); 1913 __ TraceSimMsg("ParallelMoveResolver::RestoreScratch");
1742 __ Pop(reg); 1914 __ Pop(reg);
1743 } 1915 }
1744 1916
1745 1917
1746 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { 1918 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
1747 __ TraceSimMsg("ParallelMoveResolver::SpillFpuScratch"); 1919 __ TraceSimMsg("ParallelMoveResolver::SpillFpuScratch");
1748 __ AddImmediate(SP, -kDoubleSize); 1920 __ AddImmediate(SP, -kDoubleSize);
1749 __ sdc1(reg, Address(SP)); 1921 __ StoreDToOffset(reg, SP, 0);
1750 } 1922 }
1751 1923
1752 1924
1753 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { 1925 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
1754 __ TraceSimMsg("ParallelMoveResolver::RestoreFpuScratch"); 1926 __ TraceSimMsg("ParallelMoveResolver::RestoreFpuScratch");
1755 __ ldc1(reg, Address(SP)); 1927 __ LoadDFromOffset(reg, SP, 0);
1756 __ AddImmediate(SP, kDoubleSize); 1928 __ AddImmediate(SP, kDoubleSize);
1757 } 1929 }
1758 1930
1759 1931
1760 #undef __ 1932 #undef __
1761 1933
1762 1934
1763 } // namespace dart 1935 } // namespace dart
1764 1936
1765 #endif // defined TARGET_ARCH_MIPS 1937 #endif // defined TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « runtime/vm/disassembler_mips.cc ('k') | runtime/vm/instructions_mips.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698