Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(324)

Side by Side Diff: runtime/vm/flow_graph_compiler_mips.cc

Issue 1074533002: Replace MIPS TraceSimMsg calls with Comment calls. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/assembler_mips.cc ('k') | runtime/vm/intermediate_language_mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS.
6 #if defined(TARGET_ARCH_MIPS) 6 #if defined(TARGET_ARCH_MIPS)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "vm/ast_printer.h" 10 #include "vm/ast_printer.h"
(...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after
196 } 196 }
197 197
198 198
199 #define __ assembler()-> 199 #define __ assembler()->
200 200
201 201
202 // Fall through if bool_register contains null. 202 // Fall through if bool_register contains null.
203 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, 203 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
204 Label* is_true, 204 Label* is_true,
205 Label* is_false) { 205 Label* is_false) {
206 __ TraceSimMsg("BoolToJump"); 206 __ Comment("BoolToJump");
207 Label fall_through; 207 Label fall_through;
208 __ BranchEqual(bool_register, Object::null_object(), &fall_through); 208 __ BranchEqual(bool_register, Object::null_object(), &fall_through);
209 __ BranchEqual(bool_register, Bool::True(), is_true); 209 __ BranchEqual(bool_register, Bool::True(), is_true);
210 __ b(is_false); 210 __ b(is_false);
211 __ Bind(&fall_through); 211 __ Bind(&fall_through);
212 } 212 }
213 213
214 214
215 // A0: instance (must be preserved). 215 // A0: instance (must be preserved).
216 // A1: instantiator type arguments (if used). 216 // A1: instantiator type arguments (if used).
217 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( 217 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
218 TypeTestStubKind test_kind, 218 TypeTestStubKind test_kind,
219 Register instance_reg, 219 Register instance_reg,
220 Register type_arguments_reg, 220 Register type_arguments_reg,
221 Register temp_reg, 221 Register temp_reg,
222 Label* is_instance_lbl, 222 Label* is_instance_lbl,
223 Label* is_not_instance_lbl) { 223 Label* is_not_instance_lbl) {
224 __ TraceSimMsg("CallSubtypeTestStub"); 224 __ Comment("CallSubtypeTestStub");
225 ASSERT(instance_reg == A0); 225 ASSERT(instance_reg == A0);
226 ASSERT(temp_reg == kNoRegister); // Unused on MIPS. 226 ASSERT(temp_reg == kNoRegister); // Unused on MIPS.
227 const SubtypeTestCache& type_test_cache = 227 const SubtypeTestCache& type_test_cache =
228 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New()); 228 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New());
229 StubCode* stub_code = isolate()->stub_code(); 229 StubCode* stub_code = isolate()->stub_code();
230 __ LoadObject(A2, type_test_cache); 230 __ LoadObject(A2, type_test_cache);
231 if (test_kind == kTestTypeOneArg) { 231 if (test_kind == kTestTypeOneArg) {
232 ASSERT(type_arguments_reg == kNoRegister); 232 ASSERT(type_arguments_reg == kNoRegister);
233 __ LoadImmediate(A1, reinterpret_cast<int32_t>(Object::null())); 233 __ LoadImmediate(A1, reinterpret_cast<int32_t>(Object::null()));
234 __ BranchLink(&stub_code->Subtype1TestCacheLabel()); 234 __ BranchLink(&stub_code->Subtype1TestCacheLabel());
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
323 kTempReg, 323 kTempReg,
324 is_instance_lbl, 324 is_instance_lbl,
325 is_not_instance_lbl); 325 is_not_instance_lbl);
326 } 326 }
327 327
328 328
329 void FlowGraphCompiler::CheckClassIds(Register class_id_reg, 329 void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
330 const GrowableArray<intptr_t>& class_ids, 330 const GrowableArray<intptr_t>& class_ids,
331 Label* is_equal_lbl, 331 Label* is_equal_lbl,
332 Label* is_not_equal_lbl) { 332 Label* is_not_equal_lbl) {
333 __ TraceSimMsg("CheckClassIds"); 333 __ Comment("CheckClassIds");
334 for (intptr_t i = 0; i < class_ids.length(); i++) { 334 for (intptr_t i = 0; i < class_ids.length(); i++) {
335 __ BranchEqual(class_id_reg, Immediate(class_ids[i]), is_equal_lbl); 335 __ BranchEqual(class_id_reg, Immediate(class_ids[i]), is_equal_lbl);
336 } 336 }
337 __ b(is_not_equal_lbl); 337 __ b(is_not_equal_lbl);
338 } 338 }
339 339
340 340
341 // Testing against an instantiated type with no arguments, without 341 // Testing against an instantiated type with no arguments, without
342 // SubtypeTestCache. 342 // SubtypeTestCache.
343 // A0: instance being type checked (preserved). 343 // A0: instance being type checked (preserved).
344 // Clobbers: T0, T1, T2 344 // Clobbers: T0, T1, T2
345 // Returns true if there is a fallthrough. 345 // Returns true if there is a fallthrough.
346 bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest( 346 bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
347 intptr_t token_pos, 347 intptr_t token_pos,
348 const AbstractType& type, 348 const AbstractType& type,
349 Label* is_instance_lbl, 349 Label* is_instance_lbl,
350 Label* is_not_instance_lbl) { 350 Label* is_not_instance_lbl) {
351 __ TraceSimMsg("InstantiatedTypeNoArgumentsTest");
352 __ Comment("InstantiatedTypeNoArgumentsTest"); 351 __ Comment("InstantiatedTypeNoArgumentsTest");
353 ASSERT(type.IsInstantiated()); 352 ASSERT(type.IsInstantiated());
354 const Class& type_class = Class::Handle(type.type_class()); 353 const Class& type_class = Class::Handle(type.type_class());
355 ASSERT(type_class.NumTypeArguments() == 0); 354 ASSERT(type_class.NumTypeArguments() == 0);
356 355
357 const Register kInstanceReg = A0; 356 const Register kInstanceReg = A0;
358 __ andi(T0, A0, Immediate(kSmiTagMask)); 357 __ andi(T0, A0, Immediate(kSmiTagMask));
359 // If instance is Smi, check directly. 358 // If instance is Smi, check directly.
360 const Class& smi_class = Class::Handle(Smi::Class()); 359 const Class& smi_class = Class::Handle(Smi::Class());
361 if (smi_class.IsSubtypeOf(TypeArguments::Handle(), 360 if (smi_class.IsSubtypeOf(TypeArguments::Handle(),
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
406 // Clobbers A1, A2, T0-T3. 405 // Clobbers A1, A2, T0-T3.
407 // Immediate class test already done. 406 // Immediate class test already done.
408 // TODO(srdjan): Implement a quicker subtype check, as type test 407 // TODO(srdjan): Implement a quicker subtype check, as type test
409 // arrays can grow too high, but they may be useful when optimizing 408 // arrays can grow too high, but they may be useful when optimizing
410 // code (type-feedback). 409 // code (type-feedback).
411 RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup( 410 RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
412 intptr_t token_pos, 411 intptr_t token_pos,
413 const Class& type_class, 412 const Class& type_class,
414 Label* is_instance_lbl, 413 Label* is_instance_lbl,
415 Label* is_not_instance_lbl) { 414 Label* is_not_instance_lbl) {
416 __ TraceSimMsg("Subtype1TestCacheLookup");
417 __ Comment("Subtype1TestCacheLookup"); 415 __ Comment("Subtype1TestCacheLookup");
418 const Register kInstanceReg = A0; 416 const Register kInstanceReg = A0;
419 __ LoadClass(T0, kInstanceReg); 417 __ LoadClass(T0, kInstanceReg);
420 // T0: instance class. 418 // T0: instance class.
421 // Check immediate superclass equality. 419 // Check immediate superclass equality.
422 __ lw(T0, FieldAddress(T0, Class::super_type_offset())); 420 __ lw(T0, FieldAddress(T0, Class::super_type_offset()));
423 __ lw(T0, FieldAddress(T0, Type::type_class_offset())); 421 __ lw(T0, FieldAddress(T0, Type::type_class_offset()));
424 __ BranchEqual(T0, type_class, is_instance_lbl); 422 __ BranchEqual(T0, type_class, is_instance_lbl);
425 423
426 const Register kTypeArgumentsReg = kNoRegister; 424 const Register kTypeArgumentsReg = kNoRegister;
427 const Register kTempReg = kNoRegister; 425 const Register kTempReg = kNoRegister;
428 return GenerateCallSubtypeTestStub(kTestTypeOneArg, 426 return GenerateCallSubtypeTestStub(kTestTypeOneArg,
429 kInstanceReg, 427 kInstanceReg,
430 kTypeArgumentsReg, 428 kTypeArgumentsReg,
431 kTempReg, 429 kTempReg,
432 is_instance_lbl, 430 is_instance_lbl,
433 is_not_instance_lbl); 431 is_not_instance_lbl);
434 } 432 }
435 433
436 434
437 // Generates inlined check if 'type' is a type parameter or type itself 435 // Generates inlined check if 'type' is a type parameter or type itself
438 // A0: instance (preserved). 436 // A0: instance (preserved).
439 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( 437 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
440 intptr_t token_pos, 438 intptr_t token_pos,
441 const AbstractType& type, 439 const AbstractType& type,
442 Label* is_instance_lbl, 440 Label* is_instance_lbl,
443 Label* is_not_instance_lbl) { 441 Label* is_not_instance_lbl) {
444 __ TraceSimMsg("UninstantiatedTypeTest");
445 __ Comment("UninstantiatedTypeTest"); 442 __ Comment("UninstantiatedTypeTest");
446 ASSERT(!type.IsInstantiated()); 443 ASSERT(!type.IsInstantiated());
447 // Skip check if destination is a dynamic type. 444 // Skip check if destination is a dynamic type.
448 if (type.IsTypeParameter()) { 445 if (type.IsTypeParameter()) {
449 const TypeParameter& type_param = TypeParameter::Cast(type); 446 const TypeParameter& type_param = TypeParameter::Cast(type);
450 // Load instantiator (or null) and instantiator type arguments on stack. 447 // Load instantiator (or null) and instantiator type arguments on stack.
451 __ lw(A1, Address(SP, 0)); // Get instantiator type arguments. 448 __ lw(A1, Address(SP, 0)); // Get instantiator type arguments.
452 // A1: instantiator type arguments. 449 // A1: instantiator type arguments.
453 // Check if type arguments are null, i.e. equivalent to vector of dynamic. 450 // Check if type arguments are null, i.e. equivalent to vector of dynamic.
454 __ LoadImmediate(T7, reinterpret_cast<int32_t>(Object::null())); 451 __ LoadImmediate(T7, reinterpret_cast<int32_t>(Object::null()));
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
514 // - preserved instance in A0 and optional instantiator type arguments in A1. 511 // - preserved instance in A0 and optional instantiator type arguments in A1.
515 // Clobbers: T0, T1, T2 512 // Clobbers: T0, T1, T2
516 // Note that this inlined code must be followed by the runtime_call code, as it 513 // Note that this inlined code must be followed by the runtime_call code, as it
517 // may fall through to it. Otherwise, this inline code will jump to the label 514 // may fall through to it. Otherwise, this inline code will jump to the label
518 // is_instance or to the label is_not_instance. 515 // is_instance or to the label is_not_instance.
519 RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof( 516 RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
520 intptr_t token_pos, 517 intptr_t token_pos,
521 const AbstractType& type, 518 const AbstractType& type,
522 Label* is_instance_lbl, 519 Label* is_instance_lbl,
523 Label* is_not_instance_lbl) { 520 Label* is_not_instance_lbl) {
524 __ TraceSimMsg("InlineInstanceof");
525 __ Comment("InlineInstanceof"); 521 __ Comment("InlineInstanceof");
526 if (type.IsVoidType()) { 522 if (type.IsVoidType()) {
527 // A non-null value is returned from a void function, which will result in a 523 // A non-null value is returned from a void function, which will result in a
528 // type error. A null value is handled prior to executing this inline code. 524 // type error. A null value is handled prior to executing this inline code.
529 return SubtypeTestCache::null(); 525 return SubtypeTestCache::null();
530 } 526 }
531 if (type.IsInstantiated()) { 527 if (type.IsInstantiated()) {
532 const Class& type_class = Class::ZoneHandle(type.type_class()); 528 const Class& type_class = Class::ZoneHandle(type.type_class());
533 // A class equality check is only applicable with a dst type of a 529 // A class equality check is only applicable with a dst type of a
534 // non-parameterized class, non-signature class, or with a raw dst type of 530 // non-parameterized class, non-signature class, or with a raw dst type of
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
658 // Returns: 654 // Returns:
659 // - object in A0 for successful assignable check (or throws TypeError). 655 // - object in A0 for successful assignable check (or throws TypeError).
660 // Clobbers: T0, T1, T2 656 // Clobbers: T0, T1, T2
661 // Performance notes: positive checks must be quick, negative checks can be slow 657 // Performance notes: positive checks must be quick, negative checks can be slow
662 // as they throw an exception. 658 // as they throw an exception.
663 void FlowGraphCompiler::GenerateAssertAssignable(intptr_t token_pos, 659 void FlowGraphCompiler::GenerateAssertAssignable(intptr_t token_pos,
664 intptr_t deopt_id, 660 intptr_t deopt_id,
665 const AbstractType& dst_type, 661 const AbstractType& dst_type,
666 const String& dst_name, 662 const String& dst_name,
667 LocationSummary* locs) { 663 LocationSummary* locs) {
668 __ TraceSimMsg("AssertAssignable"); 664 __ Comment("AssertAssignable");
669 ASSERT(token_pos >= 0); 665 ASSERT(token_pos >= 0);
670 ASSERT(!dst_type.IsNull()); 666 ASSERT(!dst_type.IsNull());
671 ASSERT(dst_type.IsFinalized()); 667 ASSERT(dst_type.IsFinalized());
672 // Assignable check is skipped in FlowGraphBuilder, not here. 668 // Assignable check is skipped in FlowGraphBuilder, not here.
673 ASSERT(dst_type.IsMalformedOrMalbounded() || 669 ASSERT(dst_type.IsMalformedOrMalbounded() ||
674 (!dst_type.IsDynamicType() && !dst_type.IsObjectType())); 670 (!dst_type.IsDynamicType() && !dst_type.IsObjectType()));
675 // Preserve instantiator and its type arguments. 671 // Preserve instantiator and its type arguments.
676 __ addiu(SP, SP, Immediate(-2 * kWordSize)); 672 __ addiu(SP, SP, Immediate(-2 * kWordSize));
677 __ sw(A2, Address(SP, 1 * kWordSize)); 673 __ sw(A2, Address(SP, 1 * kWordSize));
678 674
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
751 Definition* defn = instr->AsDefinition(); 747 Definition* defn = instr->AsDefinition();
752 if ((defn != NULL) && defn->HasTemp()) { 748 if ((defn != NULL) && defn->HasTemp()) {
753 __ Push(defn->locs()->out(0).reg()); 749 __ Push(defn->locs()->out(0).reg());
754 } 750 }
755 } 751 }
756 752
757 753
758 // Input parameters: 754 // Input parameters:
759 // S4: arguments descriptor array. 755 // S4: arguments descriptor array.
760 void FlowGraphCompiler::CopyParameters() { 756 void FlowGraphCompiler::CopyParameters() {
761 __ TraceSimMsg("CopyParameters");
762 __ Comment("Copy parameters"); 757 __ Comment("Copy parameters");
763 const Function& function = parsed_function().function(); 758 const Function& function = parsed_function().function();
764 LocalScope* scope = parsed_function().node_sequence()->scope(); 759 LocalScope* scope = parsed_function().node_sequence()->scope();
765 const int num_fixed_params = function.num_fixed_parameters(); 760 const int num_fixed_params = function.num_fixed_parameters();
766 const int num_opt_pos_params = function.NumOptionalPositionalParameters(); 761 const int num_opt_pos_params = function.NumOptionalPositionalParameters();
767 const int num_opt_named_params = function.NumOptionalNamedParameters(); 762 const int num_opt_named_params = function.NumOptionalNamedParameters();
768 const int num_params = 763 const int num_params =
769 num_fixed_params + num_opt_pos_params + num_opt_named_params; 764 num_fixed_params + num_opt_pos_params + num_opt_named_params;
770 ASSERT(function.NumParameters() == num_params); 765 ASSERT(function.NumParameters() == num_params);
771 ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp); 766 ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp);
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after
1080 // unless we are in debug mode or unless we are compiling a closure. 1075 // unless we are in debug mode or unless we are compiling a closure.
1081 if (num_copied_params == 0) { 1076 if (num_copied_params == 0) {
1082 #ifdef DEBUG 1077 #ifdef DEBUG
1083 ASSERT(!parsed_function().function().HasOptionalParameters()); 1078 ASSERT(!parsed_function().function().HasOptionalParameters());
1084 const bool check_arguments = !flow_graph().IsCompiledForOsr(); 1079 const bool check_arguments = !flow_graph().IsCompiledForOsr();
1085 #else 1080 #else
1086 const bool check_arguments = 1081 const bool check_arguments =
1087 function.IsClosureFunction() && !flow_graph().IsCompiledForOsr(); 1082 function.IsClosureFunction() && !flow_graph().IsCompiledForOsr();
1088 #endif 1083 #endif
1089 if (check_arguments) { 1084 if (check_arguments) {
1090 __ TraceSimMsg("Check argument count");
1091 __ Comment("Check argument count"); 1085 __ Comment("Check argument count");
1092 // Check that exactly num_fixed arguments are passed in. 1086 // Check that exactly num_fixed arguments are passed in.
1093 Label correct_num_arguments, wrong_num_arguments; 1087 Label correct_num_arguments, wrong_num_arguments;
1094 __ lw(T0, FieldAddress(S4, ArgumentsDescriptor::count_offset())); 1088 __ lw(T0, FieldAddress(S4, ArgumentsDescriptor::count_offset()));
1095 __ BranchNotEqual(T0, Immediate(Smi::RawValue(num_fixed_params)), 1089 __ BranchNotEqual(T0, Immediate(Smi::RawValue(num_fixed_params)),
1096 &wrong_num_arguments); 1090 &wrong_num_arguments);
1097 1091
1098 __ lw(T1, FieldAddress(S4, 1092 __ lw(T1, FieldAddress(S4,
1099 ArgumentsDescriptor::positional_count_offset())); 1093 ArgumentsDescriptor::positional_count_offset()));
1100 __ beq(T0, T1, &correct_num_arguments); 1094 __ beq(T0, T1, &correct_num_arguments);
(...skipping 16 matching lines...) Expand all
1117 LocalScope* scope = parsed_function().node_sequence()->scope(); 1111 LocalScope* scope = parsed_function().node_sequence()->scope();
1118 LocalVariable* closure_parameter = scope->VariableAt(0); 1112 LocalVariable* closure_parameter = scope->VariableAt(0);
1119 __ lw(CTX, Address(FP, closure_parameter->index() * kWordSize)); 1113 __ lw(CTX, Address(FP, closure_parameter->index() * kWordSize));
1120 __ lw(CTX, FieldAddress(CTX, Closure::context_offset())); 1114 __ lw(CTX, FieldAddress(CTX, Closure::context_offset()));
1121 } 1115 }
1122 1116
1123 // In unoptimized code, initialize (non-argument) stack allocated slots to 1117 // In unoptimized code, initialize (non-argument) stack allocated slots to
1124 // null. 1118 // null.
1125 if (!is_optimizing()) { 1119 if (!is_optimizing()) {
1126 ASSERT(num_locals > 0); // There is always at least context_var. 1120 ASSERT(num_locals > 0); // There is always at least context_var.
1127 __ TraceSimMsg("Initialize spill slots");
1128 __ Comment("Initialize spill slots"); 1121 __ Comment("Initialize spill slots");
1129 const intptr_t slot_base = parsed_function().first_stack_local_index(); 1122 const intptr_t slot_base = parsed_function().first_stack_local_index();
1130 const intptr_t context_index = 1123 const intptr_t context_index =
1131 parsed_function().current_context_var()->index(); 1124 parsed_function().current_context_var()->index();
1132 if (num_locals > 1) { 1125 if (num_locals > 1) {
1133 __ LoadImmediate(V0, reinterpret_cast<int32_t>(Object::null())); 1126 __ LoadImmediate(V0, reinterpret_cast<int32_t>(Object::null()));
1134 } 1127 }
1135 for (intptr_t i = 0; i < num_locals; ++i) { 1128 for (intptr_t i = 0; i < num_locals; ++i) {
1136 // Subtract index i (locals lie at lower addresses than FP). 1129 // Subtract index i (locals lie at lower addresses than FP).
1137 if (((slot_base - i) == context_index)) { 1130 if (((slot_base - i) == context_index)) {
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
1247 intptr_t deopt_id, 1240 intptr_t deopt_id,
1248 intptr_t token_pos, 1241 intptr_t token_pos,
1249 LocationSummary* locs) { 1242 LocationSummary* locs) {
1250 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0); 1243 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
1251 // Each ICData propagated from unoptimized to optimized code contains the 1244 // Each ICData propagated from unoptimized to optimized code contains the
1252 // function that corresponds to the Dart function of that IC call. Due 1245 // function that corresponds to the Dart function of that IC call. Due
1253 // to inlining in optimized code, that function may not correspond to the 1246 // to inlining in optimized code, that function may not correspond to the
1254 // top-level function (parsed_function().function()) which could be 1247 // top-level function (parsed_function().function()) which could be
1255 // reoptimized and which counter needs to be incremented. 1248 // reoptimized and which counter needs to be incremented.
1256 // Pass the function explicitly, it is used in IC stub. 1249 // Pass the function explicitly, it is used in IC stub.
1257 __ TraceSimMsg("OptimizedInstanceCall"); 1250 __ Comment("OptimizedInstanceCall");
1258 __ LoadObject(T0, parsed_function().function()); 1251 __ LoadObject(T0, parsed_function().function());
1259 __ LoadObject(S5, ic_data); 1252 __ LoadObject(S5, ic_data);
1260 GenerateDartCall(deopt_id, 1253 GenerateDartCall(deopt_id,
1261 token_pos, 1254 token_pos,
1262 target_label, 1255 target_label,
1263 RawPcDescriptors::kIcCall, 1256 RawPcDescriptors::kIcCall,
1264 locs); 1257 locs);
1265 __ Drop(argument_count); 1258 __ Drop(argument_count);
1266 } 1259 }
1267 1260
1268 1261
1269 void FlowGraphCompiler::EmitInstanceCall(ExternalLabel* target_label, 1262 void FlowGraphCompiler::EmitInstanceCall(ExternalLabel* target_label,
1270 const ICData& ic_data, 1263 const ICData& ic_data,
1271 intptr_t argument_count, 1264 intptr_t argument_count,
1272 intptr_t deopt_id, 1265 intptr_t deopt_id,
1273 intptr_t token_pos, 1266 intptr_t token_pos,
1274 LocationSummary* locs) { 1267 LocationSummary* locs) {
1275 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0); 1268 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
1276 __ TraceSimMsg("InstanceCall"); 1269 __ Comment("InstanceCall");
1277 __ LoadObject(S5, ic_data); 1270 __ LoadObject(S5, ic_data);
1278 GenerateDartCall(deopt_id, 1271 GenerateDartCall(deopt_id,
1279 token_pos, 1272 token_pos,
1280 target_label, 1273 target_label,
1281 RawPcDescriptors::kIcCall, 1274 RawPcDescriptors::kIcCall,
1282 locs); 1275 locs);
1283 __ TraceSimMsg("InstanceCall return"); 1276 __ Comment("InstanceCall return");
1284 __ Drop(argument_count); 1277 __ Drop(argument_count);
1285 #if defined(DEBUG) 1278 #if defined(DEBUG)
1286 __ LoadImmediate(S4, kInvalidObjectPointer); 1279 __ LoadImmediate(S4, kInvalidObjectPointer);
1287 #endif 1280 #endif
1288 } 1281 }
1289 1282
1290 1283
1291 void FlowGraphCompiler::EmitMegamorphicInstanceCall( 1284 void FlowGraphCompiler::EmitMegamorphicInstanceCall(
1292 const ICData& ic_data, 1285 const ICData& ic_data,
1293 intptr_t argument_count, 1286 intptr_t argument_count,
1294 intptr_t deopt_id, 1287 intptr_t deopt_id,
1295 intptr_t token_pos, 1288 intptr_t token_pos,
1296 LocationSummary* locs) { 1289 LocationSummary* locs) {
1297 MegamorphicCacheTable* table = Isolate::Current()->megamorphic_cache_table(); 1290 MegamorphicCacheTable* table = Isolate::Current()->megamorphic_cache_table();
1298 const String& name = String::Handle(ic_data.target_name()); 1291 const String& name = String::Handle(ic_data.target_name());
1299 const Array& arguments_descriptor = 1292 const Array& arguments_descriptor =
1300 Array::ZoneHandle(ic_data.arguments_descriptor()); 1293 Array::ZoneHandle(ic_data.arguments_descriptor());
1301 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); 1294 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
1302 const MegamorphicCache& cache = 1295 const MegamorphicCache& cache =
1303 MegamorphicCache::ZoneHandle(table->Lookup(name, arguments_descriptor)); 1296 MegamorphicCache::ZoneHandle(table->Lookup(name, arguments_descriptor));
1304 __ TraceSimMsg("MegamorphicInstanceCall"); 1297 __ Comment("MegamorphicInstanceCall");
1305 __ lw(T0, Address(SP, (argument_count - 1) * kWordSize)); 1298 __ lw(T0, Address(SP, (argument_count - 1) * kWordSize));
1306 __ LoadTaggedClassIdMayBeSmi(T0, T0); 1299 __ LoadTaggedClassIdMayBeSmi(T0, T0);
1307 1300
1308 // T0: class ID of the receiver (smi). 1301 // T0: class ID of the receiver (smi).
1309 __ LoadObject(T1, cache); 1302 __ LoadObject(T1, cache);
1310 __ lw(T2, FieldAddress(T1, MegamorphicCache::buckets_offset())); 1303 __ lw(T2, FieldAddress(T1, MegamorphicCache::buckets_offset()));
1311 __ lw(T1, FieldAddress(T1, MegamorphicCache::mask_offset())); 1304 __ lw(T1, FieldAddress(T1, MegamorphicCache::mask_offset()));
1312 // T2: cache buckets array. 1305 // T2: cache buckets array.
1313 // T1: mask. 1306 // T1: mask.
1314 __ mov(T3, T0); 1307 __ mov(T3, T0);
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
1377 1370
1378 1371
1379 void FlowGraphCompiler::EmitOptimizedStaticCall( 1372 void FlowGraphCompiler::EmitOptimizedStaticCall(
1380 const Function& function, 1373 const Function& function,
1381 const Array& arguments_descriptor, 1374 const Array& arguments_descriptor,
1382 intptr_t argument_count, 1375 intptr_t argument_count,
1383 intptr_t deopt_id, 1376 intptr_t deopt_id,
1384 intptr_t token_pos, 1377 intptr_t token_pos,
1385 LocationSummary* locs) { 1378 LocationSummary* locs) {
1386 StubCode* stub_code = isolate()->stub_code(); 1379 StubCode* stub_code = isolate()->stub_code();
1387 __ TraceSimMsg("StaticCall"); 1380 __ Comment("StaticCall");
1388 __ LoadObject(S4, arguments_descriptor); 1381 __ LoadObject(S4, arguments_descriptor);
1389 // Do not use the code from the function, but let the code be patched so that 1382 // Do not use the code from the function, but let the code be patched so that
1390 // we can record the outgoing edges to other code. 1383 // we can record the outgoing edges to other code.
1391 GenerateDartCall(deopt_id, 1384 GenerateDartCall(deopt_id,
1392 token_pos, 1385 token_pos,
1393 &stub_code->CallStaticFunctionLabel(), 1386 &stub_code->CallStaticFunctionLabel(),
1394 RawPcDescriptors::kOptStaticCall, 1387 RawPcDescriptors::kOptStaticCall,
1395 locs); 1388 locs);
1396 AddStaticCallTarget(function); 1389 AddStaticCallTarget(function);
1397 __ Drop(argument_count); 1390 __ Drop(argument_count);
1398 } 1391 }
1399 1392
1400 1393
1401 Condition FlowGraphCompiler::EmitEqualityRegConstCompare( 1394 Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
1402 Register reg, 1395 Register reg,
1403 const Object& obj, 1396 const Object& obj,
1404 bool needs_number_check, 1397 bool needs_number_check,
1405 intptr_t token_pos) { 1398 intptr_t token_pos) {
1406 __ TraceSimMsg("EqualityRegConstCompare"); 1399 __ Comment("EqualityRegConstCompare");
1407 ASSERT(!needs_number_check || 1400 ASSERT(!needs_number_check ||
1408 (!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint())); 1401 (!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()));
1409 if (needs_number_check) { 1402 if (needs_number_check) {
1410 StubCode* stub_code = isolate()->stub_code(); 1403 StubCode* stub_code = isolate()->stub_code();
1411 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()); 1404 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint());
1412 __ addiu(SP, SP, Immediate(-2 * kWordSize)); 1405 __ addiu(SP, SP, Immediate(-2 * kWordSize));
1413 __ sw(reg, Address(SP, 1 * kWordSize)); 1406 __ sw(reg, Address(SP, 1 * kWordSize));
1414 __ LoadObject(TMP, obj); 1407 __ LoadObject(TMP, obj);
1415 __ sw(TMP, Address(SP, 0 * kWordSize)); 1408 __ sw(TMP, Address(SP, 0 * kWordSize));
1416 if (is_optimizing()) { 1409 if (is_optimizing()) {
1417 __ BranchLinkPatchable( 1410 __ BranchLinkPatchable(
1418 &stub_code->OptimizedIdenticalWithNumberCheckLabel()); 1411 &stub_code->OptimizedIdenticalWithNumberCheckLabel());
1419 } else { 1412 } else {
1420 __ BranchLinkPatchable( 1413 __ BranchLinkPatchable(
1421 &stub_code->UnoptimizedIdenticalWithNumberCheckLabel()); 1414 &stub_code->UnoptimizedIdenticalWithNumberCheckLabel());
1422 } 1415 }
1423 if (token_pos != Scanner::kNoSourcePos) { 1416 if (token_pos != Scanner::kNoSourcePos) {
1424 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, 1417 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall,
1425 Isolate::kNoDeoptId, 1418 Isolate::kNoDeoptId,
1426 token_pos); 1419 token_pos);
1427 } 1420 }
1428 __ TraceSimMsg("EqualityRegConstCompare return"); 1421 __ Comment("EqualityRegConstCompare return");
1429 // Stub returns result in CMPRES1 (if it is 0, then reg and obj are equal). 1422 // Stub returns result in CMPRES1 (if it is 0, then reg and obj are equal).
1430 __ lw(reg, Address(SP, 1 * kWordSize)); // Restore 'reg'. 1423 __ lw(reg, Address(SP, 1 * kWordSize)); // Restore 'reg'.
1431 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Discard constant. 1424 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Discard constant.
1432 return Condition(CMPRES1, ZR, EQ); 1425 return Condition(CMPRES1, ZR, EQ);
1433 } else { 1426 } else {
1434 int16_t imm = 0; 1427 int16_t imm = 0;
1435 const Register obj_reg = __ LoadConditionOperand(CMPRES1, obj, &imm); 1428 const Register obj_reg = __ LoadConditionOperand(CMPRES1, obj, &imm);
1436 return Condition(reg, obj_reg, EQ, imm); 1429 return Condition(reg, obj_reg, EQ, imm);
1437 } 1430 }
1438 } 1431 }
1439 1432
1440 1433
1441 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, 1434 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
1442 Register right, 1435 Register right,
1443 bool needs_number_check, 1436 bool needs_number_check,
1444 intptr_t token_pos) { 1437 intptr_t token_pos) {
1445 __ TraceSimMsg("EqualityRegRegCompare");
1446 __ Comment("EqualityRegRegCompare"); 1438 __ Comment("EqualityRegRegCompare");
1447 if (needs_number_check) { 1439 if (needs_number_check) {
1448 StubCode* stub_code = isolate()->stub_code(); 1440 StubCode* stub_code = isolate()->stub_code();
1449 __ addiu(SP, SP, Immediate(-2 * kWordSize)); 1441 __ addiu(SP, SP, Immediate(-2 * kWordSize));
1450 __ sw(left, Address(SP, 1 * kWordSize)); 1442 __ sw(left, Address(SP, 1 * kWordSize));
1451 __ sw(right, Address(SP, 0 * kWordSize)); 1443 __ sw(right, Address(SP, 0 * kWordSize));
1452 if (is_optimizing()) { 1444 if (is_optimizing()) {
1453 __ BranchLinkPatchable( 1445 __ BranchLinkPatchable(
1454 &stub_code->OptimizedIdenticalWithNumberCheckLabel()); 1446 &stub_code->OptimizedIdenticalWithNumberCheckLabel());
1455 } else { 1447 } else {
1456 __ BranchLinkPatchable( 1448 __ BranchLinkPatchable(
1457 &stub_code->UnoptimizedIdenticalWithNumberCheckLabel()); 1449 &stub_code->UnoptimizedIdenticalWithNumberCheckLabel());
1458 } 1450 }
1459 if (token_pos != Scanner::kNoSourcePos) { 1451 if (token_pos != Scanner::kNoSourcePos) {
1460 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, 1452 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall,
1461 Isolate::kNoDeoptId, 1453 Isolate::kNoDeoptId,
1462 token_pos); 1454 token_pos);
1463 } 1455 }
1464 #if defined(DEBUG) 1456 #if defined(DEBUG)
1465 if (!is_optimizing()) { 1457 if (!is_optimizing()) {
1466 // Do this *after* adding the pc descriptor! 1458 // Do this *after* adding the pc descriptor!
1467 __ LoadImmediate(S4, kInvalidObjectPointer); 1459 __ LoadImmediate(S4, kInvalidObjectPointer);
1468 __ LoadImmediate(S5, kInvalidObjectPointer); 1460 __ LoadImmediate(S5, kInvalidObjectPointer);
1469 } 1461 }
1470 #endif 1462 #endif
1471 __ TraceSimMsg("EqualityRegRegCompare return"); 1463 __ Comment("EqualityRegRegCompare return");
1472 // Stub returns result in CMPRES1 (if it is 0, then left and right are 1464 // Stub returns result in CMPRES1 (if it is 0, then left and right are
1473 // equal). 1465 // equal).
1474 __ lw(right, Address(SP, 0 * kWordSize)); 1466 __ lw(right, Address(SP, 0 * kWordSize));
1475 __ lw(left, Address(SP, 1 * kWordSize)); 1467 __ lw(left, Address(SP, 1 * kWordSize));
1476 __ addiu(SP, SP, Immediate(2 * kWordSize)); 1468 __ addiu(SP, SP, Immediate(2 * kWordSize));
1477 return Condition(CMPRES1, ZR, EQ); 1469 return Condition(CMPRES1, ZR, EQ);
1478 } else { 1470 } else {
1479 return Condition(left, right, EQ); 1471 return Condition(left, right, EQ);
1480 } 1472 }
1481 } 1473 }
1482 1474
1483 1475
1484 // This function must be in sync with FlowGraphCompiler::RecordSafepoint and 1476 // This function must be in sync with FlowGraphCompiler::RecordSafepoint and
1485 // FlowGraphCompiler::SlowPathEnvironmentFor. 1477 // FlowGraphCompiler::SlowPathEnvironmentFor.
1486 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { 1478 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
1487 #if defined(DEBUG) 1479 #if defined(DEBUG)
1488 locs->CheckWritableInputs(); 1480 locs->CheckWritableInputs();
1489 ClobberDeadTempRegisters(locs); 1481 ClobberDeadTempRegisters(locs);
1490 #endif 1482 #endif
1491 1483
1492 __ TraceSimMsg("SaveLiveRegisters"); 1484 __ Comment("SaveLiveRegisters");
1493 // TODO(vegorov): consider saving only caller save (volatile) registers. 1485 // TODO(vegorov): consider saving only caller save (volatile) registers.
1494 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount(); 1486 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount();
1495 if (fpu_regs_count > 0) { 1487 if (fpu_regs_count > 0) {
1496 __ AddImmediate(SP, -(fpu_regs_count * kFpuRegisterSize)); 1488 __ AddImmediate(SP, -(fpu_regs_count * kFpuRegisterSize));
1497 // Store fpu registers with the lowest register number at the lowest 1489 // Store fpu registers with the lowest register number at the lowest
1498 // address. 1490 // address.
1499 intptr_t offset = 0; 1491 intptr_t offset = 0;
1500 for (intptr_t reg_idx = 0; reg_idx < kNumberOfFpuRegisters; ++reg_idx) { 1492 for (intptr_t reg_idx = 0; reg_idx < kNumberOfFpuRegisters; ++reg_idx) {
1501 DRegister fpu_reg = static_cast<DRegister>(reg_idx); 1493 DRegister fpu_reg = static_cast<DRegister>(reg_idx);
1502 if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) { 1494 if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) {
(...skipping 21 matching lines...) Expand all
1524 } 1516 }
1525 } 1517 }
1526 ASSERT(offset == 0); 1518 ASSERT(offset == 0);
1527 } 1519 }
1528 } 1520 }
1529 1521
1530 1522
1531 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { 1523 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
1532 // General purpose registers have the highest register number at the 1524 // General purpose registers have the highest register number at the
1533 // lowest address. 1525 // lowest address.
1534 __ TraceSimMsg("RestoreLiveRegisters"); 1526 __ Comment("RestoreLiveRegisters");
1535 const intptr_t cpu_registers = locs->live_registers()->cpu_registers(); 1527 const intptr_t cpu_registers = locs->live_registers()->cpu_registers();
1536 ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0); 1528 ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0);
1537 const int register_count = Utils::CountOneBits(cpu_registers); 1529 const int register_count = Utils::CountOneBits(cpu_registers);
1538 if (register_count > 0) { 1530 if (register_count > 0) {
1539 intptr_t offset = register_count * kWordSize; 1531 intptr_t offset = register_count * kWordSize;
1540 for (int i = 0; i < kNumberOfCpuRegisters; i++) { 1532 for (int i = 0; i < kNumberOfCpuRegisters; i++) {
1541 Register r = static_cast<Register>(i); 1533 Register r = static_cast<Register>(i);
1542 if (locs->live_registers()->ContainsRegister(r)) { 1534 if (locs->live_registers()->ContainsRegister(r)) {
1543 offset -= kWordSize; 1535 offset -= kWordSize;
1544 __ lw(r, Address(SP, offset)); 1536 __ lw(r, Address(SP, offset));
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
1594 const intptr_t len = ic_data.NumberOfChecks(); 1586 const intptr_t len = ic_data.NumberOfChecks();
1595 GrowableArray<CidTarget> sorted(len); 1587 GrowableArray<CidTarget> sorted(len);
1596 SortICDataByCount(ic_data, &sorted, /* drop_smi = */ false); 1588 SortICDataByCount(ic_data, &sorted, /* drop_smi = */ false);
1597 ASSERT(class_id_reg != S4); 1589 ASSERT(class_id_reg != S4);
1598 ASSERT(len > 0); // Why bother otherwise. 1590 ASSERT(len > 0); // Why bother otherwise.
1599 const Array& arguments_descriptor = 1591 const Array& arguments_descriptor =
1600 Array::ZoneHandle(ArgumentsDescriptor::New(argument_count, 1592 Array::ZoneHandle(ArgumentsDescriptor::New(argument_count,
1601 argument_names)); 1593 argument_names));
1602 StubCode* stub_code = isolate()->stub_code(); 1594 StubCode* stub_code = isolate()->stub_code();
1603 1595
1604 __ TraceSimMsg("EmitTestAndCall");
1605 __ Comment("EmitTestAndCall"); 1596 __ Comment("EmitTestAndCall");
1606 __ LoadObject(S4, arguments_descriptor); 1597 __ LoadObject(S4, arguments_descriptor);
1607 for (intptr_t i = 0; i < len; i++) { 1598 for (intptr_t i = 0; i < len; i++) {
1608 const bool is_last_check = (i == (len - 1)); 1599 const bool is_last_check = (i == (len - 1));
1609 Label next_test; 1600 Label next_test;
1610 if (is_last_check) { 1601 if (is_last_check) {
1611 __ BranchNotEqual(class_id_reg, Immediate(sorted[i].cid), deopt); 1602 __ BranchNotEqual(class_id_reg, Immediate(sorted[i].cid), deopt);
1612 } else { 1603 } else {
1613 __ BranchNotEqual(class_id_reg, Immediate(sorted[i].cid), &next_test); 1604 __ BranchNotEqual(class_id_reg, Immediate(sorted[i].cid), &next_test);
1614 } 1605 }
(...skipping 17 matching lines...) Expand all
1632 1623
1633 1624
1634 #undef __ 1625 #undef __
1635 #define __ compiler_->assembler()-> 1626 #define __ compiler_->assembler()->
1636 1627
1637 1628
1638 void ParallelMoveResolver::EmitMove(int index) { 1629 void ParallelMoveResolver::EmitMove(int index) {
1639 MoveOperands* move = moves_[index]; 1630 MoveOperands* move = moves_[index];
1640 const Location source = move->src(); 1631 const Location source = move->src();
1641 const Location destination = move->dest(); 1632 const Location destination = move->dest();
1642 __ TraceSimMsg("ParallelMoveResolver::EmitMove"); 1633 __ Comment("ParallelMoveResolver::EmitMove");
1643 1634
1644 if (source.IsRegister()) { 1635 if (source.IsRegister()) {
1645 if (destination.IsRegister()) { 1636 if (destination.IsRegister()) {
1646 __ mov(destination.reg(), source.reg()); 1637 __ mov(destination.reg(), source.reg());
1647 } else { 1638 } else {
1648 ASSERT(destination.IsStackSlot()); 1639 ASSERT(destination.IsStackSlot());
1649 const intptr_t dest_offset = destination.ToStackSlotOffset(); 1640 const intptr_t dest_offset = destination.ToStackSlotOffset();
1650 __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset); 1641 __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset);
1651 } 1642 }
1652 } else if (source.IsStackSlot()) { 1643 } else if (source.IsStackSlot()) {
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
1787 moves_[i]->set_src(destination); 1778 moves_[i]->set_src(destination);
1788 } else if (other_move.Blocks(destination)) { 1779 } else if (other_move.Blocks(destination)) {
1789 moves_[i]->set_src(source); 1780 moves_[i]->set_src(source);
1790 } 1781 }
1791 } 1782 }
1792 } 1783 }
1793 1784
1794 1785
1795 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, 1786 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
1796 const Address& src) { 1787 const Address& src) {
1797 __ TraceSimMsg("ParallelMoveResolver::MoveMemoryToMemory"); 1788 __ Comment("ParallelMoveResolver::MoveMemoryToMemory");
1798 __ lw(TMP, src); 1789 __ lw(TMP, src);
1799 __ sw(TMP, dst); 1790 __ sw(TMP, dst);
1800 } 1791 }
1801 1792
1802 1793
1803 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { 1794 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) {
1804 __ TraceSimMsg("ParallelMoveResolver::StoreObject"); 1795 __ Comment("ParallelMoveResolver::StoreObject");
1805 __ LoadObject(TMP, obj); 1796 __ LoadObject(TMP, obj);
1806 __ sw(TMP, dst); 1797 __ sw(TMP, dst);
1807 } 1798 }
1808 1799
1809 1800
1810 // Do not call or implement this function. Instead, use the form below that 1801 // Do not call or implement this function. Instead, use the form below that
1811 // uses an offset from the frame pointer instead of an Address. 1802 // uses an offset from the frame pointer instead of an Address.
1812 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { 1803 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
1813 UNREACHABLE(); 1804 UNREACHABLE();
1814 } 1805 }
(...skipping 23 matching lines...) Expand all
1838 ScratchRegisterScope tmp1(this, kNoRegister); 1829 ScratchRegisterScope tmp1(this, kNoRegister);
1839 ScratchRegisterScope tmp2(this, tmp1.reg()); 1830 ScratchRegisterScope tmp2(this, tmp1.reg());
1840 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1); 1831 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1841 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2); 1832 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1842 __ StoreToOffset(tmp1.reg(), base_reg1, stack_offset2); 1833 __ StoreToOffset(tmp1.reg(), base_reg1, stack_offset2);
1843 __ StoreToOffset(tmp2.reg(), base_reg2, stack_offset1); 1834 __ StoreToOffset(tmp2.reg(), base_reg2, stack_offset1);
1844 } 1835 }
1845 1836
1846 1837
1847 void ParallelMoveResolver::SpillScratch(Register reg) { 1838 void ParallelMoveResolver::SpillScratch(Register reg) {
1848 __ TraceSimMsg("ParallelMoveResolver::SpillScratch"); 1839 __ Comment("ParallelMoveResolver::SpillScratch");
1849 __ Push(reg); 1840 __ Push(reg);
1850 } 1841 }
1851 1842
1852 1843
1853 void ParallelMoveResolver::RestoreScratch(Register reg) { 1844 void ParallelMoveResolver::RestoreScratch(Register reg) {
1854 __ TraceSimMsg("ParallelMoveResolver::RestoreScratch"); 1845 __ Comment("ParallelMoveResolver::RestoreScratch");
1855 __ Pop(reg); 1846 __ Pop(reg);
1856 } 1847 }
1857 1848
1858 1849
1859 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { 1850 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
1860 __ TraceSimMsg("ParallelMoveResolver::SpillFpuScratch"); 1851 __ Comment("ParallelMoveResolver::SpillFpuScratch");
1861 __ AddImmediate(SP, -kDoubleSize); 1852 __ AddImmediate(SP, -kDoubleSize);
1862 __ StoreDToOffset(reg, SP, 0); 1853 __ StoreDToOffset(reg, SP, 0);
1863 } 1854 }
1864 1855
1865 1856
1866 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { 1857 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
1867 __ TraceSimMsg("ParallelMoveResolver::RestoreFpuScratch"); 1858 __ Comment("ParallelMoveResolver::RestoreFpuScratch");
1868 __ LoadDFromOffset(reg, SP, 0); 1859 __ LoadDFromOffset(reg, SP, 0);
1869 __ AddImmediate(SP, kDoubleSize); 1860 __ AddImmediate(SP, kDoubleSize);
1870 } 1861 }
1871 1862
1872 1863
1873 #undef __ 1864 #undef __
1874 1865
1875 1866
1876 } // namespace dart 1867 } // namespace dart
1877 1868
1878 #endif // defined TARGET_ARCH_MIPS 1869 #endif // defined TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « runtime/vm/assembler_mips.cc ('k') | runtime/vm/intermediate_language_mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698