OLD | NEW |
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. |
6 #if defined(TARGET_ARCH_ARM64) | 6 #if defined(TARGET_ARCH_ARM64) |
7 | 7 |
8 #include "vm/flow_graph_compiler.h" | 8 #include "vm/flow_graph_compiler.h" |
9 | 9 |
10 #include "vm/ast_printer.h" | 10 #include "vm/ast_printer.h" |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
57 | 57 |
58 | 58 |
59 bool FlowGraphCompiler::SupportsHardwareDivision() { | 59 bool FlowGraphCompiler::SupportsHardwareDivision() { |
60 return true; | 60 return true; |
61 } | 61 } |
62 | 62 |
63 | 63 |
64 void FlowGraphCompiler::EnterIntrinsicMode() { | 64 void FlowGraphCompiler::EnterIntrinsicMode() { |
65 ASSERT(!intrinsic_mode()); | 65 ASSERT(!intrinsic_mode()); |
66 intrinsic_mode_ = true; | 66 intrinsic_mode_ = true; |
67 assembler()->set_constant_pool_allowed(false); | 67 ASSERT(!assembler()->constant_pool_allowed()); |
68 } | 68 } |
69 | 69 |
70 | 70 |
71 void FlowGraphCompiler::ExitIntrinsicMode() { | 71 void FlowGraphCompiler::ExitIntrinsicMode() { |
72 ASSERT(intrinsic_mode()); | 72 ASSERT(intrinsic_mode()); |
73 intrinsic_mode_ = false; | 73 intrinsic_mode_ = false; |
74 assembler()->set_constant_pool_allowed(true); | |
75 } | 74 } |
76 | 75 |
77 | 76 |
78 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, | 77 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, |
79 DeoptInfoBuilder* builder, | 78 DeoptInfoBuilder* builder, |
80 const Array& deopt_table) { | 79 const Array& deopt_table) { |
81 if (deopt_env_ == NULL) { | 80 if (deopt_env_ == NULL) { |
82 ++builder->current_info_number_; | 81 ++builder->current_info_number_; |
83 return TypedData::null(); | 82 return TypedData::null(); |
84 } | 83 } |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
179 Assembler* assem = compiler->assembler(); | 178 Assembler* assem = compiler->assembler(); |
180 #define __ assem-> | 179 #define __ assem-> |
181 __ Comment("%s", Name()); | 180 __ Comment("%s", Name()); |
182 __ Bind(entry_label()); | 181 __ Bind(entry_label()); |
183 if (FLAG_trap_on_deoptimization) { | 182 if (FLAG_trap_on_deoptimization) { |
184 __ brk(0); | 183 __ brk(0); |
185 } | 184 } |
186 | 185 |
187 ASSERT(deopt_env() != NULL); | 186 ASSERT(deopt_env() != NULL); |
188 | 187 |
189 __ BranchLink(&StubCode::DeoptimizeLabel(), PP); | 188 __ BranchLink(&StubCode::DeoptimizeLabel()); |
190 set_pc_offset(assem->CodeSize()); | 189 set_pc_offset(assem->CodeSize()); |
191 #undef __ | 190 #undef __ |
192 } | 191 } |
193 | 192 |
194 | 193 |
195 #define __ assembler()-> | 194 #define __ assembler()-> |
196 | 195 |
197 | 196 |
198 // Fall through if bool_register contains null. | 197 // Fall through if bool_register contains null. |
199 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, | 198 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, |
200 Label* is_true, | 199 Label* is_true, |
201 Label* is_false) { | 200 Label* is_false) { |
202 Label fall_through; | 201 Label fall_through; |
203 __ CompareObject(bool_register, Object::null_object(), PP); | 202 __ CompareObject(bool_register, Object::null_object()); |
204 __ b(&fall_through, EQ); | 203 __ b(&fall_through, EQ); |
205 __ CompareObject(bool_register, Bool::True(), PP); | 204 __ CompareObject(bool_register, Bool::True()); |
206 __ b(is_true, EQ); | 205 __ b(is_true, EQ); |
207 __ b(is_false); | 206 __ b(is_false); |
208 __ Bind(&fall_through); | 207 __ Bind(&fall_through); |
209 } | 208 } |
210 | 209 |
211 | 210 |
212 // R0: instance (must be preserved). | 211 // R0: instance (must be preserved). |
213 // R1: instantiator type arguments (if used). | 212 // R1: instantiator type arguments (if used). |
214 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( | 213 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( |
215 TypeTestStubKind test_kind, | 214 TypeTestStubKind test_kind, |
216 Register instance_reg, | 215 Register instance_reg, |
217 Register type_arguments_reg, | 216 Register type_arguments_reg, |
218 Register temp_reg, | 217 Register temp_reg, |
219 Label* is_instance_lbl, | 218 Label* is_instance_lbl, |
220 Label* is_not_instance_lbl) { | 219 Label* is_not_instance_lbl) { |
221 ASSERT(instance_reg == R0); | 220 ASSERT(instance_reg == R0); |
222 ASSERT(temp_reg == kNoRegister); // Unused on ARM. | 221 ASSERT(temp_reg == kNoRegister); // Unused on ARM. |
223 const SubtypeTestCache& type_test_cache = | 222 const SubtypeTestCache& type_test_cache = |
224 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New()); | 223 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New()); |
225 __ LoadUniqueObject(R2, type_test_cache, PP); | 224 __ LoadUniqueObject(R2, type_test_cache); |
226 if (test_kind == kTestTypeOneArg) { | 225 if (test_kind == kTestTypeOneArg) { |
227 ASSERT(type_arguments_reg == kNoRegister); | 226 ASSERT(type_arguments_reg == kNoRegister); |
228 __ LoadObject(R1, Object::null_object(), PP); | 227 __ LoadObject(R1, Object::null_object()); |
229 __ BranchLink(&StubCode::Subtype1TestCacheLabel(), PP); | 228 __ BranchLink(&StubCode::Subtype1TestCacheLabel()); |
230 } else if (test_kind == kTestTypeTwoArgs) { | 229 } else if (test_kind == kTestTypeTwoArgs) { |
231 ASSERT(type_arguments_reg == kNoRegister); | 230 ASSERT(type_arguments_reg == kNoRegister); |
232 __ LoadObject(R1, Object::null_object(), PP); | 231 __ LoadObject(R1, Object::null_object()); |
233 __ BranchLink(&StubCode::Subtype2TestCacheLabel(), PP); | 232 __ BranchLink(&StubCode::Subtype2TestCacheLabel()); |
234 } else if (test_kind == kTestTypeThreeArgs) { | 233 } else if (test_kind == kTestTypeThreeArgs) { |
235 ASSERT(type_arguments_reg == R1); | 234 ASSERT(type_arguments_reg == R1); |
236 __ BranchLink(&StubCode::Subtype3TestCacheLabel(), PP); | 235 __ BranchLink(&StubCode::Subtype3TestCacheLabel()); |
237 } else { | 236 } else { |
238 UNREACHABLE(); | 237 UNREACHABLE(); |
239 } | 238 } |
240 // Result is in R1: null -> not found, otherwise Bool::True or Bool::False. | 239 // Result is in R1: null -> not found, otherwise Bool::True or Bool::False. |
241 GenerateBoolToJump(R1, is_instance_lbl, is_not_instance_lbl); | 240 GenerateBoolToJump(R1, is_instance_lbl, is_not_instance_lbl); |
242 return type_test_cache.raw(); | 241 return type_test_cache.raw(); |
243 } | 242 } |
244 | 243 |
245 | 244 |
246 // Jumps to labels 'is_instance' or 'is_not_instance' respectively, if | 245 // Jumps to labels 'is_instance' or 'is_not_instance' respectively, if |
(...skipping 28 matching lines...) Expand all Loading... |
275 const intptr_t from_index = num_type_args - num_type_params; | 274 const intptr_t from_index = num_type_args - num_type_params; |
276 const TypeArguments& type_arguments = | 275 const TypeArguments& type_arguments = |
277 TypeArguments::ZoneHandle(type.arguments()); | 276 TypeArguments::ZoneHandle(type.arguments()); |
278 const bool is_raw_type = type_arguments.IsNull() || | 277 const bool is_raw_type = type_arguments.IsNull() || |
279 type_arguments.IsRaw(from_index, num_type_params); | 278 type_arguments.IsRaw(from_index, num_type_params); |
280 // Signature class is an instantiated parameterized type. | 279 // Signature class is an instantiated parameterized type. |
281 if (!type_class.IsSignatureClass()) { | 280 if (!type_class.IsSignatureClass()) { |
282 if (is_raw_type) { | 281 if (is_raw_type) { |
283 const Register kClassIdReg = R2; | 282 const Register kClassIdReg = R2; |
284 // dynamic type argument, check only classes. | 283 // dynamic type argument, check only classes. |
285 __ LoadClassId(kClassIdReg, kInstanceReg, PP); | 284 __ LoadClassId(kClassIdReg, kInstanceReg); |
286 __ CompareImmediate(kClassIdReg, type_class.id(), PP); | 285 __ CompareImmediate(kClassIdReg, type_class.id()); |
287 __ b(is_instance_lbl, EQ); | 286 __ b(is_instance_lbl, EQ); |
288 // List is a very common case. | 287 // List is a very common case. |
289 if (IsListClass(type_class)) { | 288 if (IsListClass(type_class)) { |
290 GenerateListTypeCheck(kClassIdReg, is_instance_lbl); | 289 GenerateListTypeCheck(kClassIdReg, is_instance_lbl); |
291 } | 290 } |
292 return GenerateSubtype1TestCacheLookup( | 291 return GenerateSubtype1TestCacheLookup( |
293 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); | 292 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); |
294 } | 293 } |
295 // If one type argument only, check if type argument is Object or dynamic. | 294 // If one type argument only, check if type argument is Object or dynamic. |
296 if (type_arguments.Length() == 1) { | 295 if (type_arguments.Length() == 1) { |
(...skipping 23 matching lines...) Expand all Loading... |
320 is_instance_lbl, | 319 is_instance_lbl, |
321 is_not_instance_lbl); | 320 is_not_instance_lbl); |
322 } | 321 } |
323 | 322 |
324 | 323 |
325 void FlowGraphCompiler::CheckClassIds(Register class_id_reg, | 324 void FlowGraphCompiler::CheckClassIds(Register class_id_reg, |
326 const GrowableArray<intptr_t>& class_ids, | 325 const GrowableArray<intptr_t>& class_ids, |
327 Label* is_equal_lbl, | 326 Label* is_equal_lbl, |
328 Label* is_not_equal_lbl) { | 327 Label* is_not_equal_lbl) { |
329 for (intptr_t i = 0; i < class_ids.length(); i++) { | 328 for (intptr_t i = 0; i < class_ids.length(); i++) { |
330 __ CompareImmediate(class_id_reg, class_ids[i], PP); | 329 __ CompareImmediate(class_id_reg, class_ids[i]); |
331 __ b(is_equal_lbl, EQ); | 330 __ b(is_equal_lbl, EQ); |
332 } | 331 } |
333 __ b(is_not_equal_lbl); | 332 __ b(is_not_equal_lbl); |
334 } | 333 } |
335 | 334 |
336 | 335 |
337 // Testing against an instantiated type with no arguments, without | 336 // Testing against an instantiated type with no arguments, without |
338 // SubtypeTestCache. | 337 // SubtypeTestCache. |
339 // R0: instance being type checked (preserved). | 338 // R0: instance being type checked (preserved). |
340 // Clobbers R2, R3. | 339 // Clobbers R2, R3. |
(...skipping 15 matching lines...) Expand all Loading... |
356 if (smi_class.IsSubtypeOf(TypeArguments::Handle(), | 355 if (smi_class.IsSubtypeOf(TypeArguments::Handle(), |
357 type_class, | 356 type_class, |
358 TypeArguments::Handle(), | 357 TypeArguments::Handle(), |
359 NULL)) { | 358 NULL)) { |
360 __ b(is_instance_lbl, EQ); | 359 __ b(is_instance_lbl, EQ); |
361 } else { | 360 } else { |
362 __ b(is_not_instance_lbl, EQ); | 361 __ b(is_not_instance_lbl, EQ); |
363 } | 362 } |
364 // Compare if the classes are equal. | 363 // Compare if the classes are equal. |
365 const Register kClassIdReg = R2; | 364 const Register kClassIdReg = R2; |
366 __ LoadClassId(kClassIdReg, kInstanceReg, PP); | 365 __ LoadClassId(kClassIdReg, kInstanceReg); |
367 __ CompareImmediate(kClassIdReg, type_class.id(), PP); | 366 __ CompareImmediate(kClassIdReg, type_class.id()); |
368 __ b(is_instance_lbl, EQ); | 367 __ b(is_instance_lbl, EQ); |
369 // See ClassFinalizer::ResolveSuperTypeAndInterfaces for list of restricted | 368 // See ClassFinalizer::ResolveSuperTypeAndInterfaces for list of restricted |
370 // interfaces. | 369 // interfaces. |
371 // Bool interface can be implemented only by core class Bool. | 370 // Bool interface can be implemented only by core class Bool. |
372 if (type.IsBoolType()) { | 371 if (type.IsBoolType()) { |
373 __ CompareImmediate(kClassIdReg, kBoolCid, PP); | 372 __ CompareImmediate(kClassIdReg, kBoolCid); |
374 __ b(is_instance_lbl, EQ); | 373 __ b(is_instance_lbl, EQ); |
375 __ b(is_not_instance_lbl); | 374 __ b(is_not_instance_lbl); |
376 return false; | 375 return false; |
377 } | 376 } |
378 if (type.IsFunctionType()) { | 377 if (type.IsFunctionType()) { |
379 // Check if instance is a closure. | 378 // Check if instance is a closure. |
380 __ LoadClassById(R3, kClassIdReg, PP); | 379 __ LoadClassById(R3, kClassIdReg); |
381 __ LoadFieldFromOffset(R3, R3, Class::signature_function_offset(), PP); | 380 __ LoadFieldFromOffset(R3, R3, Class::signature_function_offset()); |
382 __ CompareObject(R3, Object::null_object(), PP); | 381 __ CompareObject(R3, Object::null_object()); |
383 __ b(is_instance_lbl, NE); | 382 __ b(is_instance_lbl, NE); |
384 } | 383 } |
385 // Custom checking for numbers (Smi, Mint, Bigint and Double). | 384 // Custom checking for numbers (Smi, Mint, Bigint and Double). |
386 // Note that instance is not Smi (checked above). | 385 // Note that instance is not Smi (checked above). |
387 if (type.IsSubtypeOf(Type::Handle(Type::Number()), NULL)) { | 386 if (type.IsSubtypeOf(Type::Handle(Type::Number()), NULL)) { |
388 GenerateNumberTypeCheck( | 387 GenerateNumberTypeCheck( |
389 kClassIdReg, type, is_instance_lbl, is_not_instance_lbl); | 388 kClassIdReg, type, is_instance_lbl, is_not_instance_lbl); |
390 return false; | 389 return false; |
391 } | 390 } |
392 if (type.IsStringType()) { | 391 if (type.IsStringType()) { |
(...skipping 12 matching lines...) Expand all Loading... |
405 // TODO(srdjan): Implement a quicker subtype check, as type test | 404 // TODO(srdjan): Implement a quicker subtype check, as type test |
406 // arrays can grow too high, but they may be useful when optimizing | 405 // arrays can grow too high, but they may be useful when optimizing |
407 // code (type-feedback). | 406 // code (type-feedback). |
408 RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup( | 407 RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup( |
409 intptr_t token_pos, | 408 intptr_t token_pos, |
410 const Class& type_class, | 409 const Class& type_class, |
411 Label* is_instance_lbl, | 410 Label* is_instance_lbl, |
412 Label* is_not_instance_lbl) { | 411 Label* is_not_instance_lbl) { |
413 __ Comment("Subtype1TestCacheLookup"); | 412 __ Comment("Subtype1TestCacheLookup"); |
414 const Register kInstanceReg = R0; | 413 const Register kInstanceReg = R0; |
415 __ LoadClass(R1, kInstanceReg, PP); | 414 __ LoadClass(R1, kInstanceReg); |
416 // R1: instance class. | 415 // R1: instance class. |
417 // Check immediate superclass equality. | 416 // Check immediate superclass equality. |
418 __ LoadFieldFromOffset(R2, R1, Class::super_type_offset(), PP); | 417 __ LoadFieldFromOffset(R2, R1, Class::super_type_offset()); |
419 __ LoadFieldFromOffset(R2, R2, Type::type_class_offset(), PP); | 418 __ LoadFieldFromOffset(R2, R2, Type::type_class_offset()); |
420 __ CompareObject(R2, type_class, PP); | 419 __ CompareObject(R2, type_class); |
421 __ b(is_instance_lbl, EQ); | 420 __ b(is_instance_lbl, EQ); |
422 | 421 |
423 const Register kTypeArgumentsReg = kNoRegister; | 422 const Register kTypeArgumentsReg = kNoRegister; |
424 const Register kTempReg = kNoRegister; | 423 const Register kTempReg = kNoRegister; |
425 return GenerateCallSubtypeTestStub(kTestTypeOneArg, | 424 return GenerateCallSubtypeTestStub(kTestTypeOneArg, |
426 kInstanceReg, | 425 kInstanceReg, |
427 kTypeArgumentsReg, | 426 kTypeArgumentsReg, |
428 kTempReg, | 427 kTempReg, |
429 is_instance_lbl, | 428 is_instance_lbl, |
430 is_not_instance_lbl); | 429 is_not_instance_lbl); |
431 } | 430 } |
432 | 431 |
433 | 432 |
434 // Generates inlined check if 'type' is a type parameter or type itself | 433 // Generates inlined check if 'type' is a type parameter or type itself |
435 // R0: instance (preserved). | 434 // R0: instance (preserved). |
436 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( | 435 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( |
437 intptr_t token_pos, | 436 intptr_t token_pos, |
438 const AbstractType& type, | 437 const AbstractType& type, |
439 Label* is_instance_lbl, | 438 Label* is_instance_lbl, |
440 Label* is_not_instance_lbl) { | 439 Label* is_not_instance_lbl) { |
441 __ Comment("UninstantiatedTypeTest"); | 440 __ Comment("UninstantiatedTypeTest"); |
442 ASSERT(!type.IsInstantiated()); | 441 ASSERT(!type.IsInstantiated()); |
443 // Skip check if destination is a dynamic type. | 442 // Skip check if destination is a dynamic type. |
444 if (type.IsTypeParameter()) { | 443 if (type.IsTypeParameter()) { |
445 const TypeParameter& type_param = TypeParameter::Cast(type); | 444 const TypeParameter& type_param = TypeParameter::Cast(type); |
446 // Load instantiator (or null) and instantiator type arguments on stack. | 445 // Load instantiator (or null) and instantiator type arguments on stack. |
447 __ ldr(R1, Address(SP)); // Get instantiator type arguments. | 446 __ ldr(R1, Address(SP)); // Get instantiator type arguments. |
448 // R1: instantiator type arguments. | 447 // R1: instantiator type arguments. |
449 // Check if type arguments are null, i.e. equivalent to vector of dynamic. | 448 // Check if type arguments are null, i.e. equivalent to vector of dynamic. |
450 __ CompareObject(R1, Object::null_object(), PP); | 449 __ CompareObject(R1, Object::null_object()); |
451 __ b(is_instance_lbl, EQ); | 450 __ b(is_instance_lbl, EQ); |
452 __ LoadFieldFromOffset( | 451 __ LoadFieldFromOffset( |
453 R2, R1, TypeArguments::type_at_offset(type_param.index()), PP); | 452 R2, R1, TypeArguments::type_at_offset(type_param.index())); |
454 // R2: concrete type of type. | 453 // R2: concrete type of type. |
455 // Check if type argument is dynamic. | 454 // Check if type argument is dynamic. |
456 __ CompareObject(R2, Type::ZoneHandle(Type::DynamicType()), PP); | 455 __ CompareObject(R2, Type::ZoneHandle(Type::DynamicType())); |
457 __ b(is_instance_lbl, EQ); | 456 __ b(is_instance_lbl, EQ); |
458 __ CompareObject(R2, Type::ZoneHandle(Type::ObjectType()), PP); | 457 __ CompareObject(R2, Type::ZoneHandle(Type::ObjectType())); |
459 __ b(is_instance_lbl, EQ); | 458 __ b(is_instance_lbl, EQ); |
460 | 459 |
461 // For Smi check quickly against int and num interfaces. | 460 // For Smi check quickly against int and num interfaces. |
462 Label not_smi; | 461 Label not_smi; |
463 __ tsti(R0, Immediate(kSmiTagMask)); // Value is Smi? | 462 __ tsti(R0, Immediate(kSmiTagMask)); // Value is Smi? |
464 __ b(¬_smi, NE); | 463 __ b(¬_smi, NE); |
465 __ CompareObject(R2, Type::ZoneHandle(Type::IntType()), PP); | 464 __ CompareObject(R2, Type::ZoneHandle(Type::IntType())); |
466 __ b(is_instance_lbl, EQ); | 465 __ b(is_instance_lbl, EQ); |
467 __ CompareObject(R2, Type::ZoneHandle(Type::Number()), PP); | 466 __ CompareObject(R2, Type::ZoneHandle(Type::Number())); |
468 __ b(is_instance_lbl, EQ); | 467 __ b(is_instance_lbl, EQ); |
469 // Smi must be handled in runtime. | 468 // Smi must be handled in runtime. |
470 Label fall_through; | 469 Label fall_through; |
471 __ b(&fall_through); | 470 __ b(&fall_through); |
472 | 471 |
473 __ Bind(¬_smi); | 472 __ Bind(¬_smi); |
474 // R1: instantiator type arguments. | 473 // R1: instantiator type arguments. |
475 // R0: instance. | 474 // R0: instance. |
476 const Register kInstanceReg = R0; | 475 const Register kInstanceReg = R0; |
477 const Register kTypeArgumentsReg = R1; | 476 const Register kTypeArgumentsReg = R1; |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
586 // If type is instantiated and non-parameterized, we can inline code | 585 // If type is instantiated and non-parameterized, we can inline code |
587 // checking whether the tested instance is a Smi. | 586 // checking whether the tested instance is a Smi. |
588 if (type.IsInstantiated()) { | 587 if (type.IsInstantiated()) { |
589 // A null object is only an instance of Object and dynamic, which has | 588 // A null object is only an instance of Object and dynamic, which has |
590 // already been checked above (if the type is instantiated). So we can | 589 // already been checked above (if the type is instantiated). So we can |
591 // return false here if the instance is null (and if the type is | 590 // return false here if the instance is null (and if the type is |
592 // instantiated). | 591 // instantiated). |
593 // We can only inline this null check if the type is instantiated at compile | 592 // We can only inline this null check if the type is instantiated at compile |
594 // time, since an uninstantiated type at compile time could be Object or | 593 // time, since an uninstantiated type at compile time could be Object or |
595 // dynamic at run time. | 594 // dynamic at run time. |
596 __ CompareObject(R0, Object::null_object(), PP); | 595 __ CompareObject(R0, Object::null_object()); |
597 __ b(type.IsNullType() ? &is_instance : &is_not_instance, EQ); | 596 __ b(type.IsNullType() ? &is_instance : &is_not_instance, EQ); |
598 } | 597 } |
599 | 598 |
600 // Generate inline instanceof test. | 599 // Generate inline instanceof test. |
601 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(); | 600 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(); |
602 test_cache = GenerateInlineInstanceof(token_pos, type, | 601 test_cache = GenerateInlineInstanceof(token_pos, type, |
603 &is_instance, &is_not_instance); | 602 &is_instance, &is_not_instance); |
604 | 603 |
605 // test_cache is null if there is no fall-through. | 604 // test_cache is null if there is no fall-through. |
606 Label done; | 605 Label done; |
607 if (!test_cache.IsNull()) { | 606 if (!test_cache.IsNull()) { |
608 // Generate runtime call. | 607 // Generate runtime call. |
609 // Load instantiator (R2) and its type arguments (R1). | 608 // Load instantiator (R2) and its type arguments (R1). |
610 __ ldr(R1, Address(SP, 0 * kWordSize)); | 609 __ ldr(R1, Address(SP, 0 * kWordSize)); |
611 __ ldr(R2, Address(SP, 1 * kWordSize)); | 610 __ ldr(R2, Address(SP, 1 * kWordSize)); |
612 __ PushObject(Object::null_object(), PP); // Make room for the result. | 611 __ PushObject(Object::null_object()); // Make room for the result. |
613 __ Push(R0); // Push the instance. | 612 __ Push(R0); // Push the instance. |
614 __ PushObject(type, PP); // Push the type. | 613 __ PushObject(type); // Push the type. |
615 // Push instantiator (R2) and its type arguments (R1). | 614 // Push instantiator (R2) and its type arguments (R1). |
616 __ Push(R2); | 615 __ Push(R2); |
617 __ Push(R1); | 616 __ Push(R1); |
618 __ LoadUniqueObject(R0, test_cache, PP); | 617 __ LoadUniqueObject(R0, test_cache); |
619 __ Push(R0); | 618 __ Push(R0); |
620 GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 5, locs); | 619 GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 5, locs); |
621 // Pop the parameters supplied to the runtime entry. The result of the | 620 // Pop the parameters supplied to the runtime entry. The result of the |
622 // instanceof runtime call will be left as the result of the operation. | 621 // instanceof runtime call will be left as the result of the operation. |
623 __ Drop(5); | 622 __ Drop(5); |
624 if (negate_result) { | 623 if (negate_result) { |
625 __ Pop(R1); | 624 __ Pop(R1); |
626 __ LoadObject(R0, Bool::True(), PP); | 625 __ LoadObject(R0, Bool::True()); |
627 __ CompareRegisters(R1, R0); | 626 __ CompareRegisters(R1, R0); |
628 __ b(&done, NE); | 627 __ b(&done, NE); |
629 __ LoadObject(R0, Bool::False(), PP); | 628 __ LoadObject(R0, Bool::False()); |
630 } else { | 629 } else { |
631 __ Pop(R0); | 630 __ Pop(R0); |
632 } | 631 } |
633 __ b(&done); | 632 __ b(&done); |
634 } | 633 } |
635 __ Bind(&is_not_instance); | 634 __ Bind(&is_not_instance); |
636 __ LoadObject(R0, Bool::Get(negate_result), PP); | 635 __ LoadObject(R0, Bool::Get(negate_result)); |
637 __ b(&done); | 636 __ b(&done); |
638 | 637 |
639 __ Bind(&is_instance); | 638 __ Bind(&is_instance); |
640 __ LoadObject(R0, Bool::Get(!negate_result), PP); | 639 __ LoadObject(R0, Bool::Get(!negate_result)); |
641 __ Bind(&done); | 640 __ Bind(&done); |
642 // Remove instantiator (R2) and its type arguments (R1). | 641 // Remove instantiator (R2) and its type arguments (R1). |
643 __ Drop(2); | 642 __ Drop(2); |
644 } | 643 } |
645 | 644 |
646 | 645 |
647 // Optimize assignable type check by adding inlined tests for: | 646 // Optimize assignable type check by adding inlined tests for: |
648 // - NULL -> return NULL. | 647 // - NULL -> return NULL. |
649 // - Smi -> compile time subtype check (only if dst class is not parameterized). | 648 // - Smi -> compile time subtype check (only if dst class is not parameterized). |
650 // - Class equality (only if class is not parameterized). | 649 // - Class equality (only if class is not parameterized). |
(...skipping 14 matching lines...) Expand all Loading... |
665 ASSERT(!dst_type.IsNull()); | 664 ASSERT(!dst_type.IsNull()); |
666 ASSERT(dst_type.IsFinalized()); | 665 ASSERT(dst_type.IsFinalized()); |
667 // Assignable check is skipped in FlowGraphBuilder, not here. | 666 // Assignable check is skipped in FlowGraphBuilder, not here. |
668 ASSERT(dst_type.IsMalformedOrMalbounded() || | 667 ASSERT(dst_type.IsMalformedOrMalbounded() || |
669 (!dst_type.IsDynamicType() && !dst_type.IsObjectType())); | 668 (!dst_type.IsDynamicType() && !dst_type.IsObjectType())); |
670 // Preserve instantiator (R2) and its type arguments (R1). | 669 // Preserve instantiator (R2) and its type arguments (R1). |
671 __ Push(R2); | 670 __ Push(R2); |
672 __ Push(R1); | 671 __ Push(R1); |
673 // A null object is always assignable and is returned as result. | 672 // A null object is always assignable and is returned as result. |
674 Label is_assignable, runtime_call; | 673 Label is_assignable, runtime_call; |
675 __ CompareObject(R0, Object::null_object(), PP); | 674 __ CompareObject(R0, Object::null_object()); |
676 __ b(&is_assignable, EQ); | 675 __ b(&is_assignable, EQ); |
677 | 676 |
678 // Generate throw new TypeError() if the type is malformed or malbounded. | 677 // Generate throw new TypeError() if the type is malformed or malbounded. |
679 if (dst_type.IsMalformedOrMalbounded()) { | 678 if (dst_type.IsMalformedOrMalbounded()) { |
680 __ PushObject(Object::null_object(), PP); // Make room for the result. | 679 __ PushObject(Object::null_object()); // Make room for the result. |
681 __ Push(R0); // Push the source object. | 680 __ Push(R0); // Push the source object. |
682 __ PushObject(dst_name, PP); // Push the name of the destination. | 681 __ PushObject(dst_name); // Push the name of the destination. |
683 __ PushObject(dst_type, PP); // Push the type of the destination. | 682 __ PushObject(dst_type); // Push the type of the destination. |
684 GenerateRuntimeCall(token_pos, | 683 GenerateRuntimeCall(token_pos, |
685 deopt_id, | 684 deopt_id, |
686 kBadTypeErrorRuntimeEntry, | 685 kBadTypeErrorRuntimeEntry, |
687 3, | 686 3, |
688 locs); | 687 locs); |
689 // We should never return here. | 688 // We should never return here. |
690 __ brk(0); | 689 __ brk(0); |
691 | 690 |
692 __ Bind(&is_assignable); // For a null object. | 691 __ Bind(&is_assignable); // For a null object. |
693 // Restore instantiator (R2) and its type arguments (R1). | 692 // Restore instantiator (R2) and its type arguments (R1). |
694 __ Pop(R1); | 693 __ Pop(R1); |
695 __ Pop(R2); | 694 __ Pop(R2); |
696 return; | 695 return; |
697 } | 696 } |
698 | 697 |
699 // Generate inline type check, linking to runtime call if not assignable. | 698 // Generate inline type check, linking to runtime call if not assignable. |
700 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(); | 699 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(); |
701 test_cache = GenerateInlineInstanceof(token_pos, dst_type, | 700 test_cache = GenerateInlineInstanceof(token_pos, dst_type, |
702 &is_assignable, &runtime_call); | 701 &is_assignable, &runtime_call); |
703 | 702 |
704 __ Bind(&runtime_call); | 703 __ Bind(&runtime_call); |
705 // Load instantiator (R2) and its type arguments (R1). | 704 // Load instantiator (R2) and its type arguments (R1). |
706 __ ldr(R1, Address(SP)); | 705 __ ldr(R1, Address(SP)); |
707 __ ldr(R2, Address(SP, 1 * kWordSize)); | 706 __ ldr(R2, Address(SP, 1 * kWordSize)); |
708 __ PushObject(Object::null_object(), PP); // Make room for the result. | 707 __ PushObject(Object::null_object()); // Make room for the result. |
709 __ Push(R0); // Push the source object. | 708 __ Push(R0); // Push the source object. |
710 __ PushObject(dst_type, PP); // Push the type of the destination. | 709 __ PushObject(dst_type); // Push the type of the destination. |
711 // Push instantiator (R2) and its type arguments (R1). | 710 // Push instantiator (R2) and its type arguments (R1). |
712 __ Push(R2); | 711 __ Push(R2); |
713 __ Push(R1); | 712 __ Push(R1); |
714 __ PushObject(dst_name, PP); // Push the name of the destination. | 713 __ PushObject(dst_name); // Push the name of the destination. |
715 __ LoadUniqueObject(R0, test_cache, PP); | 714 __ LoadUniqueObject(R0, test_cache); |
716 __ Push(R0); | 715 __ Push(R0); |
717 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 6, locs); | 716 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 6, locs); |
718 // Pop the parameters supplied to the runtime entry. The result of the | 717 // Pop the parameters supplied to the runtime entry. The result of the |
719 // type check runtime call is the checked value. | 718 // type check runtime call is the checked value. |
720 __ Drop(6); | 719 __ Drop(6); |
721 __ Pop(R0); | 720 __ Pop(R0); |
722 | 721 |
723 __ Bind(&is_assignable); | 722 __ Bind(&is_assignable); |
724 // Restore instantiator (R2) and its type arguments (R1). | 723 // Restore instantiator (R2) and its type arguments (R1). |
725 __ Pop(R1); | 724 __ Pop(R1); |
(...skipping 25 matching lines...) Expand all Loading... |
751 num_fixed_params + num_opt_pos_params + num_opt_named_params; | 750 num_fixed_params + num_opt_pos_params + num_opt_named_params; |
752 ASSERT(function.NumParameters() == num_params); | 751 ASSERT(function.NumParameters() == num_params); |
753 ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp); | 752 ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp); |
754 | 753 |
755 // Check that min_num_pos_args <= num_pos_args <= max_num_pos_args, | 754 // Check that min_num_pos_args <= num_pos_args <= max_num_pos_args, |
756 // where num_pos_args is the number of positional arguments passed in. | 755 // where num_pos_args is the number of positional arguments passed in. |
757 const int min_num_pos_args = num_fixed_params; | 756 const int min_num_pos_args = num_fixed_params; |
758 const int max_num_pos_args = num_fixed_params + num_opt_pos_params; | 757 const int max_num_pos_args = num_fixed_params + num_opt_pos_params; |
759 | 758 |
760 __ LoadFieldFromOffset( | 759 __ LoadFieldFromOffset( |
761 R8, R4, ArgumentsDescriptor::positional_count_offset(), PP); | 760 R8, R4, ArgumentsDescriptor::positional_count_offset()); |
762 // Check that min_num_pos_args <= num_pos_args. | 761 // Check that min_num_pos_args <= num_pos_args. |
763 Label wrong_num_arguments; | 762 Label wrong_num_arguments; |
764 __ CompareImmediate(R8, Smi::RawValue(min_num_pos_args), PP); | 763 __ CompareImmediate(R8, Smi::RawValue(min_num_pos_args)); |
765 __ b(&wrong_num_arguments, LT); | 764 __ b(&wrong_num_arguments, LT); |
766 // Check that num_pos_args <= max_num_pos_args. | 765 // Check that num_pos_args <= max_num_pos_args. |
767 __ CompareImmediate(R8, Smi::RawValue(max_num_pos_args), PP); | 766 __ CompareImmediate(R8, Smi::RawValue(max_num_pos_args)); |
768 __ b(&wrong_num_arguments, GT); | 767 __ b(&wrong_num_arguments, GT); |
769 | 768 |
770 // Copy positional arguments. | 769 // Copy positional arguments. |
771 // Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied | 770 // Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied |
772 // to fp[kFirstLocalSlotFromFp - i]. | 771 // to fp[kFirstLocalSlotFromFp - i]. |
773 | 772 |
774 __ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset(), PP); | 773 __ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset()); |
775 // Since R7 and R8 are Smi, use LSL 2 instead of LSL 3. | 774 // Since R7 and R8 are Smi, use LSL 2 instead of LSL 3. |
776 // Let R7 point to the last passed positional argument, i.e. to | 775 // Let R7 point to the last passed positional argument, i.e. to |
777 // fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)]. | 776 // fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)]. |
778 __ sub(R7, R7, Operand(R8)); | 777 __ sub(R7, R7, Operand(R8)); |
779 __ add(R7, FP, Operand(R7, LSL, 2)); | 778 __ add(R7, FP, Operand(R7, LSL, 2)); |
780 __ add(R7, R7, Operand((kParamEndSlotFromFp + 1) * kWordSize)); | 779 __ add(R7, R7, Operand((kParamEndSlotFromFp + 1) * kWordSize)); |
781 | 780 |
782 // Let R6 point to the last copied positional argument, i.e. to | 781 // Let R6 point to the last copied positional argument, i.e. to |
783 // fp[kFirstLocalSlotFromFp - (num_pos_args - 1)]. | 782 // fp[kFirstLocalSlotFromFp - (num_pos_args - 1)]. |
784 __ AddImmediate(R6, FP, (kFirstLocalSlotFromFp + 1) * kWordSize, PP); | 783 __ AddImmediate(R6, FP, (kFirstLocalSlotFromFp + 1) * kWordSize); |
785 __ sub(R6, R6, Operand(R8, LSL, 2)); // R8 is a Smi. | 784 __ sub(R6, R6, Operand(R8, LSL, 2)); // R8 is a Smi. |
786 __ SmiUntag(R8); | 785 __ SmiUntag(R8); |
787 Label loop, loop_condition; | 786 Label loop, loop_condition; |
788 __ b(&loop_condition); | 787 __ b(&loop_condition); |
789 // We do not use the final allocation index of the variable here, i.e. | 788 // We do not use the final allocation index of the variable here, i.e. |
790 // scope->VariableAt(i)->index(), because captured variables still need | 789 // scope->VariableAt(i)->index(), because captured variables still need |
791 // to be copied to the context that is not yet allocated. | 790 // to be copied to the context that is not yet allocated. |
792 const Address argument_addr(R7, R8, UXTX, Address::Scaled); | 791 const Address argument_addr(R7, R8, UXTX, Address::Scaled); |
793 const Address copy_addr(R6, R8, UXTX, Address::Scaled); | 792 const Address copy_addr(R6, R8, UXTX, Address::Scaled); |
794 __ Bind(&loop); | 793 __ Bind(&loop); |
(...skipping 23 matching lines...) Expand all Loading... |
818 const intptr_t result = opt_param_name.CompareTo(param_i->name()); | 817 const intptr_t result = opt_param_name.CompareTo(param_i->name()); |
819 ASSERT(result != 0); | 818 ASSERT(result != 0); |
820 if (result > 0) break; | 819 if (result > 0) break; |
821 opt_param[i + 1] = opt_param[i]; | 820 opt_param[i + 1] = opt_param[i]; |
822 opt_param_position[i + 1] = opt_param_position[i]; | 821 opt_param_position[i + 1] = opt_param_position[i]; |
823 } | 822 } |
824 opt_param[i + 1] = parameter; | 823 opt_param[i + 1] = parameter; |
825 opt_param_position[i + 1] = pos; | 824 opt_param_position[i + 1] = pos; |
826 } | 825 } |
827 // Generate code handling each optional parameter in alphabetical order. | 826 // Generate code handling each optional parameter in alphabetical order. |
828 __ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset(), PP); | 827 __ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset()); |
829 __ LoadFieldFromOffset( | 828 __ LoadFieldFromOffset( |
830 R8, R4, ArgumentsDescriptor::positional_count_offset(), PP); | 829 R8, R4, ArgumentsDescriptor::positional_count_offset()); |
831 __ SmiUntag(R8); | 830 __ SmiUntag(R8); |
832 // Let R7 point to the first passed argument, i.e. to | 831 // Let R7 point to the first passed argument, i.e. to |
833 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (R7) is Smi. | 832 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (R7) is Smi. |
834 __ add(R7, FP, Operand(R7, LSL, 2)); | 833 __ add(R7, FP, Operand(R7, LSL, 2)); |
835 __ AddImmediate(R7, R7, kParamEndSlotFromFp * kWordSize, PP); | 834 __ AddImmediate(R7, R7, kParamEndSlotFromFp * kWordSize); |
836 // Let R6 point to the entry of the first named argument. | 835 // Let R6 point to the entry of the first named argument. |
837 __ add(R6, R4, Operand( | 836 __ add(R6, R4, Operand( |
838 ArgumentsDescriptor::first_named_entry_offset() - kHeapObjectTag)); | 837 ArgumentsDescriptor::first_named_entry_offset() - kHeapObjectTag)); |
839 for (int i = 0; i < num_opt_named_params; i++) { | 838 for (int i = 0; i < num_opt_named_params; i++) { |
840 Label load_default_value, assign_optional_parameter; | 839 Label load_default_value, assign_optional_parameter; |
841 const int param_pos = opt_param_position[i]; | 840 const int param_pos = opt_param_position[i]; |
842 // Check if this named parameter was passed in. | 841 // Check if this named parameter was passed in. |
843 // Load R5 with the name of the argument. | 842 // Load R5 with the name of the argument. |
844 __ LoadFromOffset(R5, R6, ArgumentsDescriptor::name_offset(), PP); | 843 __ LoadFromOffset(R5, R6, ArgumentsDescriptor::name_offset()); |
845 ASSERT(opt_param[i]->name().IsSymbol()); | 844 ASSERT(opt_param[i]->name().IsSymbol()); |
846 __ CompareObject(R5, opt_param[i]->name(), PP); | 845 __ CompareObject(R5, opt_param[i]->name()); |
847 __ b(&load_default_value, NE); | 846 __ b(&load_default_value, NE); |
848 // Load R5 with passed-in argument at provided arg_pos, i.e. at | 847 // Load R5 with passed-in argument at provided arg_pos, i.e. at |
849 // fp[kParamEndSlotFromFp + num_args - arg_pos]. | 848 // fp[kParamEndSlotFromFp + num_args - arg_pos]. |
850 __ LoadFromOffset(R5, R6, ArgumentsDescriptor::position_offset(), PP); | 849 __ LoadFromOffset(R5, R6, ArgumentsDescriptor::position_offset()); |
851 // R5 is arg_pos as Smi. | 850 // R5 is arg_pos as Smi. |
852 // Point to next named entry. | 851 // Point to next named entry. |
853 __ add(R6, R6, Operand(ArgumentsDescriptor::named_entry_size())); | 852 __ add(R6, R6, Operand(ArgumentsDescriptor::named_entry_size())); |
854 // Negate and untag R5 so we can use in scaled address mode. | 853 // Negate and untag R5 so we can use in scaled address mode. |
855 __ subs(R5, ZR, Operand(R5, ASR, 1)); | 854 __ subs(R5, ZR, Operand(R5, ASR, 1)); |
856 Address argument_addr(R7, R5, UXTX, Address::Scaled); // R5 is untagged. | 855 Address argument_addr(R7, R5, UXTX, Address::Scaled); // R5 is untagged. |
857 __ ldr(R5, argument_addr); | 856 __ ldr(R5, argument_addr); |
858 __ b(&assign_optional_parameter); | 857 __ b(&assign_optional_parameter); |
859 __ Bind(&load_default_value); | 858 __ Bind(&load_default_value); |
860 // Load R5 with default argument. | 859 // Load R5 with default argument. |
861 const Object& value = Object::ZoneHandle( | 860 const Object& value = Object::ZoneHandle( |
862 parsed_function().default_parameter_values().At( | 861 parsed_function().default_parameter_values().At( |
863 param_pos - num_fixed_params)); | 862 param_pos - num_fixed_params)); |
864 __ LoadObject(R5, value, PP); | 863 __ LoadObject(R5, value); |
865 __ Bind(&assign_optional_parameter); | 864 __ Bind(&assign_optional_parameter); |
866 // Assign R5 to fp[kFirstLocalSlotFromFp - param_pos]. | 865 // Assign R5 to fp[kFirstLocalSlotFromFp - param_pos]. |
867 // We do not use the final allocation index of the variable here, i.e. | 866 // We do not use the final allocation index of the variable here, i.e. |
868 // scope->VariableAt(i)->index(), because captured variables still need | 867 // scope->VariableAt(i)->index(), because captured variables still need |
869 // to be copied to the context that is not yet allocated. | 868 // to be copied to the context that is not yet allocated. |
870 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; | 869 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; |
871 __ StoreToOffset(R5, FP, computed_param_pos * kWordSize, PP); | 870 __ StoreToOffset(R5, FP, computed_param_pos * kWordSize); |
872 } | 871 } |
873 delete[] opt_param; | 872 delete[] opt_param; |
874 delete[] opt_param_position; | 873 delete[] opt_param_position; |
875 if (check_correct_named_args) { | 874 if (check_correct_named_args) { |
876 // Check that R6 now points to the null terminator in the arguments | 875 // Check that R6 now points to the null terminator in the arguments |
877 // descriptor. | 876 // descriptor. |
878 __ ldr(R5, Address(R6)); | 877 __ ldr(R5, Address(R6)); |
879 __ CompareObject(R5, Object::null_object(), PP); | 878 __ CompareObject(R5, Object::null_object()); |
880 __ b(&all_arguments_processed, EQ); | 879 __ b(&all_arguments_processed, EQ); |
881 } | 880 } |
882 } else { | 881 } else { |
883 ASSERT(num_opt_pos_params > 0); | 882 ASSERT(num_opt_pos_params > 0); |
884 __ LoadFieldFromOffset( | 883 __ LoadFieldFromOffset( |
885 R8, R4, ArgumentsDescriptor::positional_count_offset(), PP); | 884 R8, R4, ArgumentsDescriptor::positional_count_offset()); |
886 __ SmiUntag(R8); | 885 __ SmiUntag(R8); |
887 for (int i = 0; i < num_opt_pos_params; i++) { | 886 for (int i = 0; i < num_opt_pos_params; i++) { |
888 Label next_parameter; | 887 Label next_parameter; |
889 // Handle this optional positional parameter only if k or fewer positional | 888 // Handle this optional positional parameter only if k or fewer positional |
890 // arguments have been passed, where k is param_pos, the position of this | 889 // arguments have been passed, where k is param_pos, the position of this |
891 // optional parameter in the formal parameter list. | 890 // optional parameter in the formal parameter list. |
892 const int param_pos = num_fixed_params + i; | 891 const int param_pos = num_fixed_params + i; |
893 __ CompareImmediate(R8, param_pos, PP); | 892 __ CompareImmediate(R8, param_pos); |
894 __ b(&next_parameter, GT); | 893 __ b(&next_parameter, GT); |
895 // Load R5 with default argument. | 894 // Load R5 with default argument. |
896 const Object& value = Object::ZoneHandle( | 895 const Object& value = Object::ZoneHandle( |
897 parsed_function().default_parameter_values().At(i)); | 896 parsed_function().default_parameter_values().At(i)); |
898 __ LoadObject(R5, value, PP); | 897 __ LoadObject(R5, value); |
899 // Assign R5 to fp[kFirstLocalSlotFromFp - param_pos]. | 898 // Assign R5 to fp[kFirstLocalSlotFromFp - param_pos]. |
900 // We do not use the final allocation index of the variable here, i.e. | 899 // We do not use the final allocation index of the variable here, i.e. |
901 // scope->VariableAt(i)->index(), because captured variables still need | 900 // scope->VariableAt(i)->index(), because captured variables still need |
902 // to be copied to the context that is not yet allocated. | 901 // to be copied to the context that is not yet allocated. |
903 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; | 902 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; |
904 __ StoreToOffset(R5, FP, computed_param_pos * kWordSize, PP); | 903 __ StoreToOffset(R5, FP, computed_param_pos * kWordSize); |
905 __ Bind(&next_parameter); | 904 __ Bind(&next_parameter); |
906 } | 905 } |
907 if (check_correct_named_args) { | 906 if (check_correct_named_args) { |
908 __ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset(), PP); | 907 __ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset()); |
909 __ SmiUntag(R7); | 908 __ SmiUntag(R7); |
910 // Check that R8 equals R7, i.e. no named arguments passed. | 909 // Check that R8 equals R7, i.e. no named arguments passed. |
911 __ CompareRegisters(R8, R7); | 910 __ CompareRegisters(R8, R7); |
912 __ b(&all_arguments_processed, EQ); | 911 __ b(&all_arguments_processed, EQ); |
913 } | 912 } |
914 } | 913 } |
915 | 914 |
916 __ Bind(&wrong_num_arguments); | 915 __ Bind(&wrong_num_arguments); |
917 if (function.IsClosureFunction()) { | 916 if (function.IsClosureFunction()) { |
| 917 ASSERT(assembler()->constant_pool_allowed()); |
918 __ LeaveDartFrame(); // The arguments are still on the stack. | 918 __ LeaveDartFrame(); // The arguments are still on the stack. |
| 919 // Do not use caller's pool ptr in branch. |
| 920 ASSERT(!assembler()->constant_pool_allowed()); |
919 __ BranchPatchable(&StubCode::CallClosureNoSuchMethodLabel()); | 921 __ BranchPatchable(&StubCode::CallClosureNoSuchMethodLabel()); |
| 922 __ set_constant_pool_allowed(true); |
920 // The noSuchMethod call may return to the caller, but not here. | 923 // The noSuchMethod call may return to the caller, but not here. |
921 } else if (check_correct_named_args) { | 924 } else if (check_correct_named_args) { |
922 __ Stop("Wrong arguments"); | 925 __ Stop("Wrong arguments"); |
923 } | 926 } |
924 | 927 |
925 __ Bind(&all_arguments_processed); | 928 __ Bind(&all_arguments_processed); |
926 // Nullify originally passed arguments only after they have been copied and | 929 // Nullify originally passed arguments only after they have been copied and |
927 // checked, otherwise noSuchMethod would not see their original values. | 930 // checked, otherwise noSuchMethod would not see their original values. |
928 // This step can be skipped in case we decide that formal parameters are | 931 // This step can be skipped in case we decide that formal parameters are |
929 // implicitly final, since garbage collecting the unmodified value is not | 932 // implicitly final, since garbage collecting the unmodified value is not |
930 // an issue anymore. | 933 // an issue anymore. |
931 | 934 |
932 // R4 : arguments descriptor array. | 935 // R4 : arguments descriptor array. |
933 __ LoadFieldFromOffset(R8, R4, ArgumentsDescriptor::count_offset(), PP); | 936 __ LoadFieldFromOffset(R8, R4, ArgumentsDescriptor::count_offset()); |
934 __ SmiUntag(R8); | 937 __ SmiUntag(R8); |
935 __ add(R7, FP, Operand((kParamEndSlotFromFp + 1) * kWordSize)); | 938 __ add(R7, FP, Operand((kParamEndSlotFromFp + 1) * kWordSize)); |
936 const Address original_argument_addr(R7, R8, UXTX, Address::Scaled); | 939 const Address original_argument_addr(R7, R8, UXTX, Address::Scaled); |
937 __ LoadObject(TMP, Object::null_object(), PP); | 940 __ LoadObject(TMP, Object::null_object()); |
938 Label null_args_loop, null_args_loop_condition; | 941 Label null_args_loop, null_args_loop_condition; |
939 __ b(&null_args_loop_condition); | 942 __ b(&null_args_loop_condition); |
940 __ Bind(&null_args_loop); | 943 __ Bind(&null_args_loop); |
941 __ str(TMP, original_argument_addr); | 944 __ str(TMP, original_argument_addr); |
942 __ Bind(&null_args_loop_condition); | 945 __ Bind(&null_args_loop_condition); |
943 __ subs(R8, R8, Operand(1)); | 946 __ subs(R8, R8, Operand(1)); |
944 __ b(&null_args_loop, PL); | 947 __ b(&null_args_loop, PL); |
945 } | 948 } |
946 | 949 |
947 | 950 |
948 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { | 951 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { |
949 // LR: return address. | 952 // LR: return address. |
950 // SP: receiver. | 953 // SP: receiver. |
951 // Sequence node has one return node, its input is load field node. | 954 // Sequence node has one return node, its input is load field node. |
952 __ Comment("Inlined Getter"); | 955 __ Comment("Inlined Getter"); |
953 __ LoadFromOffset(R0, SP, 0 * kWordSize, PP); | 956 __ LoadFromOffset(R0, SP, 0 * kWordSize); |
954 __ LoadFromOffset(R0, R0, offset - kHeapObjectTag, PP); | 957 __ LoadFromOffset(R0, R0, offset - kHeapObjectTag); |
955 __ ret(); | 958 __ ret(); |
956 } | 959 } |
957 | 960 |
958 | 961 |
959 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { | 962 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { |
960 // LR: return address. | 963 // LR: return address. |
961 // SP+1: receiver. | 964 // SP+1: receiver. |
962 // SP+0: value. | 965 // SP+0: value. |
963 // Sequence node has one store node and one return NULL node. | 966 // Sequence node has one store node and one return NULL node. |
964 __ Comment("Inlined Setter"); | 967 __ Comment("Inlined Setter"); |
965 __ LoadFromOffset(R0, SP, 1 * kWordSize, PP); // Receiver. | 968 __ LoadFromOffset(R0, SP, 1 * kWordSize); // Receiver. |
966 __ LoadFromOffset(R1, SP, 0 * kWordSize, PP); // Value. | 969 __ LoadFromOffset(R1, SP, 0 * kWordSize); // Value. |
967 __ StoreIntoObjectOffset(R0, offset, R1, PP); | 970 __ StoreIntoObjectOffset(R0, offset, R1); |
968 __ LoadObject(R0, Object::null_object(), PP); | 971 __ LoadObject(R0, Object::null_object()); |
969 __ ret(); | 972 __ ret(); |
970 } | 973 } |
971 | 974 |
972 | 975 |
973 void FlowGraphCompiler::EmitFrameEntry() { | 976 void FlowGraphCompiler::EmitFrameEntry() { |
974 const Function& function = parsed_function().function(); | 977 const Function& function = parsed_function().function(); |
975 Register new_pp = kNoPP; | 978 Register new_pp = kNoRegister; |
976 if (CanOptimizeFunction() && | 979 if (CanOptimizeFunction() && |
977 function.IsOptimizable() && | 980 function.IsOptimizable() && |
978 (!is_optimizing() || may_reoptimize())) { | 981 (!is_optimizing() || may_reoptimize())) { |
979 const Register function_reg = R6; | 982 const Register function_reg = R6; |
| 983 const Register saved_pp = R7; |
980 new_pp = R13; | 984 new_pp = R13; |
| 985 // The pool pointer is not setup before entering the Dart frame. |
| 986 // Preserve PP of caller. |
| 987 __ mov(saved_pp, PP); |
981 | 988 |
982 // Set up pool pointer in new_pp. | 989 // Temporarily setup pool pointer for this dart function. |
983 __ LoadPoolPointer(new_pp); | 990 __ LoadPoolPointer(); |
984 | 991 |
985 // Load function object using the callee's pool pointer. | 992 // Load function object using the callee's pool pointer. |
986 __ LoadObject(function_reg, function, new_pp); | 993 __ LoadObject(function_reg, function); |
| 994 // Preserve new PP and restore PP of caller. |
| 995 __ mov(new_pp, PP); |
| 996 __ mov(PP, saved_pp); |
| 997 __ set_constant_pool_allowed(false); |
987 | 998 |
988 // Patch point is after the eventually inlined function object. | 999 // Patch point is after the eventually inlined function object. |
989 entry_patch_pc_offset_ = assembler()->CodeSize(); | 1000 entry_patch_pc_offset_ = assembler()->CodeSize(); |
990 | 1001 |
991 __ LoadFieldFromOffset( | 1002 __ LoadFieldFromOffset( |
992 R7, function_reg, Function::usage_counter_offset(), new_pp, kWord); | 1003 R7, function_reg, Function::usage_counter_offset(), kWord); |
993 // Reoptimization of an optimized function is triggered by counting in | 1004 // Reoptimization of an optimized function is triggered by counting in |
994 // IC stubs, but not at the entry of the function. | 1005 // IC stubs, but not at the entry of the function. |
995 if (!is_optimizing()) { | 1006 if (!is_optimizing()) { |
996 __ add(R7, R7, Operand(1)); | 1007 __ add(R7, R7, Operand(1)); |
997 __ StoreFieldToOffset( | 1008 __ StoreFieldToOffset( |
998 R7, function_reg, Function::usage_counter_offset(), new_pp, kWord); | 1009 R7, function_reg, Function::usage_counter_offset(), kWord); |
999 } | 1010 } |
1000 __ CompareImmediate(R7, GetOptimizationThreshold(), new_pp); | 1011 __ CompareImmediate(R7, GetOptimizationThreshold()); |
1001 ASSERT(function_reg == R6); | 1012 ASSERT(function_reg == R6); |
1002 Label dont_optimize; | 1013 Label dont_optimize; |
1003 __ b(&dont_optimize, LT); | 1014 __ b(&dont_optimize, LT); |
1004 __ Branch(&StubCode::OptimizeFunctionLabel(), new_pp); | 1015 __ Branch(&StubCode::OptimizeFunctionLabel()); |
1005 __ Bind(&dont_optimize); | 1016 __ Bind(&dont_optimize); |
1006 } else if (!flow_graph().IsCompiledForOsr()) { | 1017 } else if (!flow_graph().IsCompiledForOsr()) { |
1007 // We have to load the PP here too because a load of an external label | |
1008 // may be patched at the AddCurrentDescriptor below. | |
1009 new_pp = R13; | |
1010 | |
1011 // Set up pool pointer in new_pp. | |
1012 __ LoadPoolPointer(new_pp); | |
1013 | |
1014 entry_patch_pc_offset_ = assembler()->CodeSize(); | 1018 entry_patch_pc_offset_ = assembler()->CodeSize(); |
1015 } | 1019 } |
1016 __ Comment("Enter frame"); | 1020 __ Comment("Enter frame"); |
1017 if (flow_graph().IsCompiledForOsr()) { | 1021 if (flow_graph().IsCompiledForOsr()) { |
1018 intptr_t extra_slots = StackSize() | 1022 intptr_t extra_slots = StackSize() |
1019 - flow_graph().num_stack_locals() | 1023 - flow_graph().num_stack_locals() |
1020 - flow_graph().num_copied_params(); | 1024 - flow_graph().num_copied_params(); |
1021 ASSERT(extra_slots >= 0); | 1025 ASSERT(extra_slots >= 0); |
1022 __ EnterOsrFrame(extra_slots * kWordSize, new_pp); | 1026 __ EnterOsrFrame(extra_slots * kWordSize, new_pp); |
1023 } else { | 1027 } else { |
1024 ASSERT(StackSize() >= 0); | 1028 ASSERT(StackSize() >= 0); |
1025 __ EnterDartFrameWithInfo(StackSize() * kWordSize, new_pp); | 1029 __ EnterDartFrameWithInfo(StackSize() * kWordSize, new_pp); |
1026 } | 1030 } |
1027 } | 1031 } |
1028 | 1032 |
1029 | 1033 |
1030 // Input parameters: | 1034 // Input parameters: |
1031 // LR: return address. | 1035 // LR: return address. |
1032 // SP: address of last argument. | 1036 // SP: address of last argument. |
1033 // FP: caller's frame pointer. | 1037 // FP: caller's frame pointer. |
1034 // PP: caller's pool pointer. | 1038 // PP: caller's pool pointer. |
1035 // R5: ic-data. | 1039 // R5: ic-data. |
1036 // R4: arguments descriptor array. | 1040 // R4: arguments descriptor array. |
1037 void FlowGraphCompiler::CompileGraph() { | 1041 void FlowGraphCompiler::CompileGraph() { |
1038 InitCompiler(); | 1042 InitCompiler(); |
1039 | 1043 |
1040 TryIntrinsify(); | 1044 TryIntrinsify(); |
1041 | 1045 |
1042 EmitFrameEntry(); | 1046 EmitFrameEntry(); |
| 1047 ASSERT(assembler()->constant_pool_allowed()); |
1043 | 1048 |
1044 const Function& function = parsed_function().function(); | 1049 const Function& function = parsed_function().function(); |
1045 | 1050 |
1046 const int num_fixed_params = function.num_fixed_parameters(); | 1051 const int num_fixed_params = function.num_fixed_parameters(); |
1047 const int num_copied_params = parsed_function().num_copied_params(); | 1052 const int num_copied_params = parsed_function().num_copied_params(); |
1048 const int num_locals = parsed_function().num_stack_locals(); | 1053 const int num_locals = parsed_function().num_stack_locals(); |
1049 | 1054 |
1050 // We check the number of passed arguments when we have to copy them due to | 1055 // We check the number of passed arguments when we have to copy them due to |
1051 // the presence of optional parameters. | 1056 // the presence of optional parameters. |
1052 // No such checking code is generated if only fixed parameters are declared, | 1057 // No such checking code is generated if only fixed parameters are declared, |
1053 // unless we are in debug mode or unless we are compiling a closure. | 1058 // unless we are in debug mode or unless we are compiling a closure. |
1054 if (num_copied_params == 0) { | 1059 if (num_copied_params == 0) { |
1055 #ifdef DEBUG | 1060 #ifdef DEBUG |
1056 ASSERT(!parsed_function().function().HasOptionalParameters()); | 1061 ASSERT(!parsed_function().function().HasOptionalParameters()); |
1057 const bool check_arguments = !flow_graph().IsCompiledForOsr(); | 1062 const bool check_arguments = !flow_graph().IsCompiledForOsr(); |
1058 #else | 1063 #else |
1059 const bool check_arguments = | 1064 const bool check_arguments = |
1060 function.IsClosureFunction() && !flow_graph().IsCompiledForOsr(); | 1065 function.IsClosureFunction() && !flow_graph().IsCompiledForOsr(); |
1061 #endif | 1066 #endif |
1062 if (check_arguments) { | 1067 if (check_arguments) { |
1063 __ Comment("Check argument count"); | 1068 __ Comment("Check argument count"); |
1064 // Check that exactly num_fixed arguments are passed in. | 1069 // Check that exactly num_fixed arguments are passed in. |
1065 Label correct_num_arguments, wrong_num_arguments; | 1070 Label correct_num_arguments, wrong_num_arguments; |
1066 __ LoadFieldFromOffset(R0, R4, ArgumentsDescriptor::count_offset(), PP); | 1071 __ LoadFieldFromOffset(R0, R4, ArgumentsDescriptor::count_offset()); |
1067 __ CompareImmediate(R0, Smi::RawValue(num_fixed_params), PP); | 1072 __ CompareImmediate(R0, Smi::RawValue(num_fixed_params)); |
1068 __ b(&wrong_num_arguments, NE); | 1073 __ b(&wrong_num_arguments, NE); |
1069 __ LoadFieldFromOffset(R1, R4, | 1074 __ LoadFieldFromOffset(R1, R4, |
1070 ArgumentsDescriptor::positional_count_offset(), PP); | 1075 ArgumentsDescriptor::positional_count_offset()); |
1071 __ CompareRegisters(R0, R1); | 1076 __ CompareRegisters(R0, R1); |
1072 __ b(&correct_num_arguments, EQ); | 1077 __ b(&correct_num_arguments, EQ); |
1073 __ Bind(&wrong_num_arguments); | 1078 __ Bind(&wrong_num_arguments); |
1074 if (function.IsClosureFunction()) { | 1079 if (function.IsClosureFunction()) { |
| 1080 ASSERT(assembler()->constant_pool_allowed()); |
1075 __ LeaveDartFrame(); // The arguments are still on the stack. | 1081 __ LeaveDartFrame(); // The arguments are still on the stack. |
| 1082 // Do not use caller's pool ptr in branch. |
| 1083 ASSERT(!assembler()->constant_pool_allowed()); |
1076 __ BranchPatchable(&StubCode::CallClosureNoSuchMethodLabel()); | 1084 __ BranchPatchable(&StubCode::CallClosureNoSuchMethodLabel()); |
| 1085 __ set_constant_pool_allowed(true); |
1077 // The noSuchMethod call may return to the caller, but not here. | 1086 // The noSuchMethod call may return to the caller, but not here. |
1078 } else { | 1087 } else { |
1079 __ Stop("Wrong number of arguments"); | 1088 __ Stop("Wrong number of arguments"); |
1080 } | 1089 } |
1081 __ Bind(&correct_num_arguments); | 1090 __ Bind(&correct_num_arguments); |
1082 } | 1091 } |
1083 } else if (!flow_graph().IsCompiledForOsr()) { | 1092 } else if (!flow_graph().IsCompiledForOsr()) { |
1084 CopyParameters(); | 1093 CopyParameters(); |
1085 } | 1094 } |
1086 | 1095 |
1087 if (function.IsClosureFunction() && !flow_graph().IsCompiledForOsr()) { | 1096 if (function.IsClosureFunction() && !flow_graph().IsCompiledForOsr()) { |
1088 // Load context from the closure object (first argument). | 1097 // Load context from the closure object (first argument). |
1089 LocalScope* scope = parsed_function().node_sequence()->scope(); | 1098 LocalScope* scope = parsed_function().node_sequence()->scope(); |
1090 LocalVariable* closure_parameter = scope->VariableAt(0); | 1099 LocalVariable* closure_parameter = scope->VariableAt(0); |
1091 __ ldr(CTX, Address(FP, closure_parameter->index() * kWordSize)); | 1100 __ ldr(CTX, Address(FP, closure_parameter->index() * kWordSize)); |
1092 __ ldr(CTX, FieldAddress(CTX, Closure::context_offset())); | 1101 __ ldr(CTX, FieldAddress(CTX, Closure::context_offset())); |
1093 } | 1102 } |
1094 | 1103 |
1095 // In unoptimized code, initialize (non-argument) stack allocated slots to | 1104 // In unoptimized code, initialize (non-argument) stack allocated slots to |
1096 // null. | 1105 // null. |
1097 if (!is_optimizing()) { | 1106 if (!is_optimizing()) { |
1098 ASSERT(num_locals > 0); // There is always at least context_var. | 1107 ASSERT(num_locals > 0); // There is always at least context_var. |
1099 __ Comment("Initialize spill slots"); | 1108 __ Comment("Initialize spill slots"); |
1100 const intptr_t slot_base = parsed_function().first_stack_local_index(); | 1109 const intptr_t slot_base = parsed_function().first_stack_local_index(); |
1101 const intptr_t context_index = | 1110 const intptr_t context_index = |
1102 parsed_function().current_context_var()->index(); | 1111 parsed_function().current_context_var()->index(); |
1103 if (num_locals > 1) { | 1112 if (num_locals > 1) { |
1104 __ LoadObject(R0, Object::null_object(), PP); | 1113 __ LoadObject(R0, Object::null_object()); |
1105 } | 1114 } |
1106 for (intptr_t i = 0; i < num_locals; ++i) { | 1115 for (intptr_t i = 0; i < num_locals; ++i) { |
1107 // Subtract index i (locals lie at lower addresses than FP). | 1116 // Subtract index i (locals lie at lower addresses than FP). |
1108 if (((slot_base - i) == context_index)) { | 1117 if (((slot_base - i) == context_index)) { |
1109 if (function.IsClosureFunction()) { | 1118 if (function.IsClosureFunction()) { |
1110 __ StoreToOffset(CTX, FP, (slot_base - i) * kWordSize, PP); | 1119 __ StoreToOffset(CTX, FP, (slot_base - i) * kWordSize); |
1111 } else { | 1120 } else { |
1112 const Context& empty_context = Context::ZoneHandle( | 1121 const Context& empty_context = Context::ZoneHandle( |
1113 zone(), isolate()->object_store()->empty_context()); | 1122 zone(), isolate()->object_store()->empty_context()); |
1114 __ LoadObject(R1, empty_context, PP); | 1123 __ LoadObject(R1, empty_context); |
1115 __ StoreToOffset(R1, FP, (slot_base - i) * kWordSize, PP); | 1124 __ StoreToOffset(R1, FP, (slot_base - i) * kWordSize); |
1116 } | 1125 } |
1117 } else { | 1126 } else { |
1118 ASSERT(num_locals > 1); | 1127 ASSERT(num_locals > 1); |
1119 __ StoreToOffset(R0, FP, (slot_base - i) * kWordSize, PP); | 1128 __ StoreToOffset(R0, FP, (slot_base - i) * kWordSize); |
1120 } | 1129 } |
1121 } | 1130 } |
1122 } | 1131 } |
1123 | 1132 |
1124 VisitBlocks(); | 1133 VisitBlocks(); |
1125 | 1134 |
1126 __ brk(0); | 1135 __ brk(0); |
| 1136 ASSERT(assembler()->constant_pool_allowed()); |
1127 GenerateDeferredCode(); | 1137 GenerateDeferredCode(); |
1128 | 1138 |
1129 // Emit function patching code. This will be swapped with the first 3 | 1139 // Emit function patching code. This will be swapped with the first 3 |
1130 // instructions at entry point. | 1140 // instructions at entry point. |
1131 patch_code_pc_offset_ = assembler()->CodeSize(); | 1141 patch_code_pc_offset_ = assembler()->CodeSize(); |
1132 __ BranchPatchable(&StubCode::FixCallersTargetLabel()); | 1142 __ BranchPatchable(&StubCode::FixCallersTargetLabel()); |
1133 | 1143 |
1134 if (is_optimizing()) { | 1144 if (is_optimizing()) { |
1135 lazy_deopt_pc_offset_ = assembler()->CodeSize(); | 1145 lazy_deopt_pc_offset_ = assembler()->CodeSize(); |
1136 __ BranchPatchable(&StubCode::DeoptimizeLazyLabel()); | 1146 __ BranchPatchable(&StubCode::DeoptimizeLazyLabel()); |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1191 } | 1201 } |
1192 } | 1202 } |
1193 | 1203 |
1194 | 1204 |
1195 void FlowGraphCompiler::EmitEdgeCounter() { | 1205 void FlowGraphCompiler::EmitEdgeCounter() { |
1196 // We do not check for overflow when incrementing the edge counter. The | 1206 // We do not check for overflow when incrementing the edge counter. The |
1197 // function should normally be optimized long before the counter can | 1207 // function should normally be optimized long before the counter can |
1198 // overflow; and though we do not reset the counters when we optimize or | 1208 // overflow; and though we do not reset the counters when we optimize or |
1199 // deoptimize, there is a bound on the number of | 1209 // deoptimize, there is a bound on the number of |
1200 // optimization/deoptimization cycles we will attempt. | 1210 // optimization/deoptimization cycles we will attempt. |
| 1211 ASSERT(assembler_->constant_pool_allowed()); |
1201 const Array& counter = Array::ZoneHandle(Array::New(1, Heap::kOld)); | 1212 const Array& counter = Array::ZoneHandle(Array::New(1, Heap::kOld)); |
1202 counter.SetAt(0, Smi::Handle(Smi::New(0))); | 1213 counter.SetAt(0, Smi::Handle(Smi::New(0))); |
1203 __ Comment("Edge counter"); | 1214 __ Comment("Edge counter"); |
1204 __ LoadUniqueObject(R0, counter, PP); | 1215 __ LoadUniqueObject(R0, counter); |
1205 __ LoadFieldFromOffset(TMP, R0, Array::element_offset(0), PP); | 1216 __ LoadFieldFromOffset(TMP, R0, Array::element_offset(0)); |
1206 __ add(TMP, TMP, Operand(Smi::RawValue(1))); | 1217 __ add(TMP, TMP, Operand(Smi::RawValue(1))); |
1207 __ StoreFieldToOffset(TMP, R0, Array::element_offset(0), PP); | 1218 __ StoreFieldToOffset(TMP, R0, Array::element_offset(0)); |
1208 } | 1219 } |
1209 | 1220 |
1210 | 1221 |
1211 void FlowGraphCompiler::EmitOptimizedInstanceCall( | 1222 void FlowGraphCompiler::EmitOptimizedInstanceCall( |
1212 ExternalLabel* target_label, | 1223 ExternalLabel* target_label, |
1213 const ICData& ic_data, | 1224 const ICData& ic_data, |
1214 intptr_t argument_count, | 1225 intptr_t argument_count, |
1215 intptr_t deopt_id, | 1226 intptr_t deopt_id, |
1216 intptr_t token_pos, | 1227 intptr_t token_pos, |
1217 LocationSummary* locs) { | 1228 LocationSummary* locs) { |
1218 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0); | 1229 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0); |
1219 // Each ICData propagated from unoptimized to optimized code contains the | 1230 // Each ICData propagated from unoptimized to optimized code contains the |
1220 // function that corresponds to the Dart function of that IC call. Due | 1231 // function that corresponds to the Dart function of that IC call. Due |
1221 // to inlining in optimized code, that function may not correspond to the | 1232 // to inlining in optimized code, that function may not correspond to the |
1222 // top-level function (parsed_function().function()) which could be | 1233 // top-level function (parsed_function().function()) which could be |
1223 // reoptimized and which counter needs to be incremented. | 1234 // reoptimized and which counter needs to be incremented. |
1224 // Pass the function explicitly, it is used in IC stub. | 1235 // Pass the function explicitly, it is used in IC stub. |
1225 | 1236 |
1226 __ LoadObject(R6, parsed_function().function(), PP); | 1237 __ LoadObject(R6, parsed_function().function()); |
1227 __ LoadUniqueObject(R5, ic_data, PP); | 1238 __ LoadUniqueObject(R5, ic_data); |
1228 GenerateDartCall(deopt_id, | 1239 GenerateDartCall(deopt_id, |
1229 token_pos, | 1240 token_pos, |
1230 target_label, | 1241 target_label, |
1231 RawPcDescriptors::kIcCall, | 1242 RawPcDescriptors::kIcCall, |
1232 locs); | 1243 locs); |
1233 __ Drop(argument_count); | 1244 __ Drop(argument_count); |
1234 } | 1245 } |
1235 | 1246 |
1236 | 1247 |
1237 void FlowGraphCompiler::EmitInstanceCall(ExternalLabel* target_label, | 1248 void FlowGraphCompiler::EmitInstanceCall(ExternalLabel* target_label, |
1238 const ICData& ic_data, | 1249 const ICData& ic_data, |
1239 intptr_t argument_count, | 1250 intptr_t argument_count, |
1240 intptr_t deopt_id, | 1251 intptr_t deopt_id, |
1241 intptr_t token_pos, | 1252 intptr_t token_pos, |
1242 LocationSummary* locs) { | 1253 LocationSummary* locs) { |
1243 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0); | 1254 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0); |
1244 __ LoadUniqueObject(R5, ic_data, PP); | 1255 __ LoadUniqueObject(R5, ic_data); |
1245 GenerateDartCall(deopt_id, | 1256 GenerateDartCall(deopt_id, |
1246 token_pos, | 1257 token_pos, |
1247 target_label, | 1258 target_label, |
1248 RawPcDescriptors::kIcCall, | 1259 RawPcDescriptors::kIcCall, |
1249 locs); | 1260 locs); |
1250 __ Drop(argument_count); | 1261 __ Drop(argument_count); |
1251 } | 1262 } |
1252 | 1263 |
1253 | 1264 |
1254 void FlowGraphCompiler::EmitMegamorphicInstanceCall( | 1265 void FlowGraphCompiler::EmitMegamorphicInstanceCall( |
1255 const ICData& ic_data, | 1266 const ICData& ic_data, |
1256 intptr_t argument_count, | 1267 intptr_t argument_count, |
1257 intptr_t deopt_id, | 1268 intptr_t deopt_id, |
1258 intptr_t token_pos, | 1269 intptr_t token_pos, |
1259 LocationSummary* locs) { | 1270 LocationSummary* locs) { |
1260 MegamorphicCacheTable* table = Isolate::Current()->megamorphic_cache_table(); | 1271 MegamorphicCacheTable* table = Isolate::Current()->megamorphic_cache_table(); |
1261 const String& name = String::Handle(ic_data.target_name()); | 1272 const String& name = String::Handle(ic_data.target_name()); |
1262 const Array& arguments_descriptor = | 1273 const Array& arguments_descriptor = |
1263 Array::ZoneHandle(ic_data.arguments_descriptor()); | 1274 Array::ZoneHandle(ic_data.arguments_descriptor()); |
1264 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); | 1275 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); |
1265 const MegamorphicCache& cache = | 1276 const MegamorphicCache& cache = |
1266 MegamorphicCache::ZoneHandle(table->Lookup(name, arguments_descriptor)); | 1277 MegamorphicCache::ZoneHandle(table->Lookup(name, arguments_descriptor)); |
1267 const Register receiverR = R0; | 1278 const Register receiverR = R0; |
1268 const Register cacheR = R1; | 1279 const Register cacheR = R1; |
1269 const Register targetR = R1; | 1280 const Register targetR = R1; |
1270 __ LoadFromOffset(receiverR, SP, (argument_count - 1) * kWordSize, PP); | 1281 __ LoadFromOffset(receiverR, SP, (argument_count - 1) * kWordSize); |
1271 __ LoadObject(cacheR, cache, PP); | 1282 __ LoadObject(cacheR, cache); |
1272 | 1283 |
1273 if (FLAG_use_megamorphic_stub) { | 1284 if (FLAG_use_megamorphic_stub) { |
1274 __ BranchLink(&StubCode::MegamorphicLookupLabel(), PP); | 1285 __ BranchLink(&StubCode::MegamorphicLookupLabel()); |
1275 } else { | 1286 } else { |
1276 StubCode::EmitMegamorphicLookup(assembler(), receiverR, cacheR, targetR); | 1287 StubCode::EmitMegamorphicLookup(assembler(), receiverR, cacheR, targetR); |
1277 } | 1288 } |
1278 __ LoadObject(R5, ic_data, PP); | 1289 __ LoadObject(R5, ic_data); |
1279 __ LoadObject(R4, arguments_descriptor, PP); | 1290 __ LoadObject(R4, arguments_descriptor); |
1280 __ blr(targetR); | 1291 __ blr(targetR); |
1281 AddCurrentDescriptor(RawPcDescriptors::kOther, | 1292 AddCurrentDescriptor(RawPcDescriptors::kOther, |
1282 Isolate::kNoDeoptId, token_pos); | 1293 Isolate::kNoDeoptId, token_pos); |
1283 RecordSafepoint(locs); | 1294 RecordSafepoint(locs); |
1284 const intptr_t deopt_id_after = Isolate::ToDeoptAfter(deopt_id); | 1295 const intptr_t deopt_id_after = Isolate::ToDeoptAfter(deopt_id); |
1285 if (is_optimizing()) { | 1296 if (is_optimizing()) { |
1286 AddDeoptIndexAtCall(deopt_id_after, token_pos); | 1297 AddDeoptIndexAtCall(deopt_id_after, token_pos); |
1287 } else { | 1298 } else { |
1288 // Add deoptimization continuation point after the call and before the | 1299 // Add deoptimization continuation point after the call and before the |
1289 // arguments are removed. | 1300 // arguments are removed. |
1290 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); | 1301 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
1291 } | 1302 } |
1292 __ Drop(argument_count); | 1303 __ Drop(argument_count); |
1293 } | 1304 } |
1294 | 1305 |
1295 | 1306 |
1296 void FlowGraphCompiler::EmitUnoptimizedStaticCall( | 1307 void FlowGraphCompiler::EmitUnoptimizedStaticCall( |
1297 intptr_t argument_count, | 1308 intptr_t argument_count, |
1298 intptr_t deopt_id, | 1309 intptr_t deopt_id, |
1299 intptr_t token_pos, | 1310 intptr_t token_pos, |
1300 LocationSummary* locs, | 1311 LocationSummary* locs, |
1301 const ICData& ic_data) { | 1312 const ICData& ic_data) { |
1302 const uword label_address = | 1313 const uword label_address = |
1303 StubCode::UnoptimizedStaticCallEntryPoint(ic_data.NumArgsTested()); | 1314 StubCode::UnoptimizedStaticCallEntryPoint(ic_data.NumArgsTested()); |
1304 ExternalLabel target_label(label_address); | 1315 ExternalLabel target_label(label_address); |
1305 __ LoadObject(R5, ic_data, PP); | 1316 __ LoadObject(R5, ic_data); |
1306 GenerateDartCall(deopt_id, | 1317 GenerateDartCall(deopt_id, |
1307 token_pos, | 1318 token_pos, |
1308 &target_label, | 1319 &target_label, |
1309 RawPcDescriptors::kUnoptStaticCall, | 1320 RawPcDescriptors::kUnoptStaticCall, |
1310 locs); | 1321 locs); |
1311 __ Drop(argument_count); | 1322 __ Drop(argument_count); |
1312 } | 1323 } |
1313 | 1324 |
1314 | 1325 |
1315 void FlowGraphCompiler::EmitOptimizedStaticCall( | 1326 void FlowGraphCompiler::EmitOptimizedStaticCall( |
1316 const Function& function, | 1327 const Function& function, |
1317 const Array& arguments_descriptor, | 1328 const Array& arguments_descriptor, |
1318 intptr_t argument_count, | 1329 intptr_t argument_count, |
1319 intptr_t deopt_id, | 1330 intptr_t deopt_id, |
1320 intptr_t token_pos, | 1331 intptr_t token_pos, |
1321 LocationSummary* locs) { | 1332 LocationSummary* locs) { |
1322 __ LoadObject(R4, arguments_descriptor, PP); | 1333 __ LoadObject(R4, arguments_descriptor); |
1323 // Do not use the code from the function, but let the code be patched so that | 1334 // Do not use the code from the function, but let the code be patched so that |
1324 // we can record the outgoing edges to other code. | 1335 // we can record the outgoing edges to other code. |
1325 GenerateDartCall(deopt_id, | 1336 GenerateDartCall(deopt_id, |
1326 token_pos, | 1337 token_pos, |
1327 &StubCode::CallStaticFunctionLabel(), | 1338 &StubCode::CallStaticFunctionLabel(), |
1328 RawPcDescriptors::kOther, | 1339 RawPcDescriptors::kOther, |
1329 locs); | 1340 locs); |
1330 AddStaticCallTarget(function); | 1341 AddStaticCallTarget(function); |
1331 __ Drop(argument_count); | 1342 __ Drop(argument_count); |
1332 } | 1343 } |
1333 | 1344 |
1334 | 1345 |
1335 Condition FlowGraphCompiler::EmitEqualityRegConstCompare( | 1346 Condition FlowGraphCompiler::EmitEqualityRegConstCompare( |
1336 Register reg, | 1347 Register reg, |
1337 const Object& obj, | 1348 const Object& obj, |
1338 bool needs_number_check, | 1349 bool needs_number_check, |
1339 intptr_t token_pos) { | 1350 intptr_t token_pos) { |
1340 if (needs_number_check) { | 1351 if (needs_number_check) { |
1341 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()); | 1352 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()); |
1342 __ Push(reg); | 1353 __ Push(reg); |
1343 __ PushObject(obj, PP); | 1354 __ PushObject(obj); |
1344 if (is_optimizing()) { | 1355 if (is_optimizing()) { |
1345 __ BranchLinkPatchable( | 1356 __ BranchLinkPatchable( |
1346 &StubCode::OptimizedIdenticalWithNumberCheckLabel()); | 1357 &StubCode::OptimizedIdenticalWithNumberCheckLabel()); |
1347 } else { | 1358 } else { |
1348 __ BranchLinkPatchable( | 1359 __ BranchLinkPatchable( |
1349 &StubCode::UnoptimizedIdenticalWithNumberCheckLabel()); | 1360 &StubCode::UnoptimizedIdenticalWithNumberCheckLabel()); |
1350 } | 1361 } |
1351 if (token_pos != Scanner::kNoSourcePos) { | 1362 if (token_pos != Scanner::kNoSourcePos) { |
1352 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, | 1363 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, |
1353 Isolate::kNoDeoptId, | 1364 Isolate::kNoDeoptId, |
1354 token_pos); | 1365 token_pos); |
1355 } | 1366 } |
1356 // Stub returns result in flags (result of a cmp, we need Z computed). | 1367 // Stub returns result in flags (result of a cmp, we need Z computed). |
1357 __ Drop(1); // Discard constant. | 1368 __ Drop(1); // Discard constant. |
1358 __ Pop(reg); // Restore 'reg'. | 1369 __ Pop(reg); // Restore 'reg'. |
1359 } else { | 1370 } else { |
1360 __ CompareObject(reg, obj, PP); | 1371 __ CompareObject(reg, obj); |
1361 } | 1372 } |
1362 return EQ; | 1373 return EQ; |
1363 } | 1374 } |
1364 | 1375 |
1365 | 1376 |
1366 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, | 1377 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, |
1367 Register right, | 1378 Register right, |
1368 bool needs_number_check, | 1379 bool needs_number_check, |
1369 intptr_t token_pos) { | 1380 intptr_t token_pos) { |
1370 if (needs_number_check) { | 1381 if (needs_number_check) { |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1473 intptr_t token_index, | 1484 intptr_t token_index, |
1474 LocationSummary* locs) { | 1485 LocationSummary* locs) { |
1475 ASSERT(is_optimizing()); | 1486 ASSERT(is_optimizing()); |
1476 | 1487 |
1477 __ Comment("EmitTestAndCall"); | 1488 __ Comment("EmitTestAndCall"); |
1478 const Array& arguments_descriptor = | 1489 const Array& arguments_descriptor = |
1479 Array::ZoneHandle(ArgumentsDescriptor::New(argument_count, | 1490 Array::ZoneHandle(ArgumentsDescriptor::New(argument_count, |
1480 argument_names)); | 1491 argument_names)); |
1481 | 1492 |
1482 // Load receiver into R0. | 1493 // Load receiver into R0. |
1483 __ LoadFromOffset(R0, SP, (argument_count - 1) * kWordSize, PP); | 1494 __ LoadFromOffset(R0, SP, (argument_count - 1) * kWordSize); |
1484 __ LoadObject(R4, arguments_descriptor, PP); | 1495 __ LoadObject(R4, arguments_descriptor); |
1485 | 1496 |
1486 const bool kFirstCheckIsSmi = ic_data.GetReceiverClassIdAt(0) == kSmiCid; | 1497 const bool kFirstCheckIsSmi = ic_data.GetReceiverClassIdAt(0) == kSmiCid; |
1487 const intptr_t kNumChecks = ic_data.NumberOfChecks(); | 1498 const intptr_t kNumChecks = ic_data.NumberOfChecks(); |
1488 | 1499 |
1489 ASSERT(!ic_data.IsNull() && (kNumChecks > 0)); | 1500 ASSERT(!ic_data.IsNull() && (kNumChecks > 0)); |
1490 | 1501 |
1491 Label after_smi_test; | 1502 Label after_smi_test; |
1492 __ tsti(R0, Immediate(kSmiTagMask)); | 1503 __ tsti(R0, Immediate(kSmiTagMask)); |
1493 if (kFirstCheckIsSmi) { | 1504 if (kFirstCheckIsSmi) { |
1494 // Jump if receiver is not Smi. | 1505 // Jump if receiver is not Smi. |
(...skipping 25 matching lines...) Expand all Loading... |
1520 ASSERT(!ic_data.IsNull() && (kNumChecks > 0)); | 1531 ASSERT(!ic_data.IsNull() && (kNumChecks > 0)); |
1521 GrowableArray<CidTarget> sorted(kNumChecks); | 1532 GrowableArray<CidTarget> sorted(kNumChecks); |
1522 SortICDataByCount(ic_data, &sorted, /* drop_smi = */ true); | 1533 SortICDataByCount(ic_data, &sorted, /* drop_smi = */ true); |
1523 | 1534 |
1524 // Value is not Smi, | 1535 // Value is not Smi, |
1525 const intptr_t kSortedLen = sorted.length(); | 1536 const intptr_t kSortedLen = sorted.length(); |
1526 // If kSortedLen is 0 then only a Smi check was needed; the Smi check above | 1537 // If kSortedLen is 0 then only a Smi check was needed; the Smi check above |
1527 // will fail if there was only one check and receiver is not Smi. | 1538 // will fail if there was only one check and receiver is not Smi. |
1528 if (kSortedLen == 0) return; | 1539 if (kSortedLen == 0) return; |
1529 | 1540 |
1530 __ LoadClassId(R2, R0, PP); | 1541 __ LoadClassId(R2, R0); |
1531 for (intptr_t i = 0; i < kSortedLen; i++) { | 1542 for (intptr_t i = 0; i < kSortedLen; i++) { |
1532 const bool kIsLastCheck = (i == (kSortedLen - 1)); | 1543 const bool kIsLastCheck = (i == (kSortedLen - 1)); |
1533 ASSERT(sorted[i].cid != kSmiCid); | 1544 ASSERT(sorted[i].cid != kSmiCid); |
1534 Label next_test; | 1545 Label next_test; |
1535 __ CompareImmediate(R2, sorted[i].cid, PP); | 1546 __ CompareImmediate(R2, sorted[i].cid); |
1536 if (kIsLastCheck) { | 1547 if (kIsLastCheck) { |
1537 __ b(failed, NE); | 1548 __ b(failed, NE); |
1538 } else { | 1549 } else { |
1539 __ b(&next_test, NE); | 1550 __ b(&next_test, NE); |
1540 } | 1551 } |
1541 // Do not use the code from the function, but let the code be patched so | 1552 // Do not use the code from the function, but let the code be patched so |
1542 // that we can record the outgoing edges to other code. | 1553 // that we can record the outgoing edges to other code. |
1543 GenerateDartCall(deopt_id, | 1554 GenerateDartCall(deopt_id, |
1544 token_index, | 1555 token_index, |
1545 &StubCode::CallStaticFunctionLabel(), | 1556 &StubCode::CallStaticFunctionLabel(), |
(...skipping 18 matching lines...) Expand all Loading... |
1564 MoveOperands* move = moves_[index]; | 1575 MoveOperands* move = moves_[index]; |
1565 const Location source = move->src(); | 1576 const Location source = move->src(); |
1566 const Location destination = move->dest(); | 1577 const Location destination = move->dest(); |
1567 | 1578 |
1568 if (source.IsRegister()) { | 1579 if (source.IsRegister()) { |
1569 if (destination.IsRegister()) { | 1580 if (destination.IsRegister()) { |
1570 __ mov(destination.reg(), source.reg()); | 1581 __ mov(destination.reg(), source.reg()); |
1571 } else { | 1582 } else { |
1572 ASSERT(destination.IsStackSlot()); | 1583 ASSERT(destination.IsStackSlot()); |
1573 const intptr_t dest_offset = destination.ToStackSlotOffset(); | 1584 const intptr_t dest_offset = destination.ToStackSlotOffset(); |
1574 __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset, PP); | 1585 __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset); |
1575 } | 1586 } |
1576 } else if (source.IsStackSlot()) { | 1587 } else if (source.IsStackSlot()) { |
1577 if (destination.IsRegister()) { | 1588 if (destination.IsRegister()) { |
1578 const intptr_t source_offset = source.ToStackSlotOffset(); | 1589 const intptr_t source_offset = source.ToStackSlotOffset(); |
1579 __ LoadFromOffset( | 1590 __ LoadFromOffset( |
1580 destination.reg(), source.base_reg(), source_offset, PP); | 1591 destination.reg(), source.base_reg(), source_offset); |
1581 } else { | 1592 } else { |
1582 ASSERT(destination.IsStackSlot()); | 1593 ASSERT(destination.IsStackSlot()); |
1583 const intptr_t source_offset = source.ToStackSlotOffset(); | 1594 const intptr_t source_offset = source.ToStackSlotOffset(); |
1584 const intptr_t dest_offset = destination.ToStackSlotOffset(); | 1595 const intptr_t dest_offset = destination.ToStackSlotOffset(); |
1585 ScratchRegisterScope tmp(this, kNoRegister); | 1596 ScratchRegisterScope tmp(this, kNoRegister); |
1586 __ LoadFromOffset(tmp.reg(), source.base_reg(), source_offset, PP); | 1597 __ LoadFromOffset(tmp.reg(), source.base_reg(), source_offset); |
1587 __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset, PP); | 1598 __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset); |
1588 } | 1599 } |
1589 } else if (source.IsFpuRegister()) { | 1600 } else if (source.IsFpuRegister()) { |
1590 if (destination.IsFpuRegister()) { | 1601 if (destination.IsFpuRegister()) { |
1591 __ vmov(destination.fpu_reg(), source.fpu_reg()); | 1602 __ vmov(destination.fpu_reg(), source.fpu_reg()); |
1592 } else { | 1603 } else { |
1593 if (destination.IsDoubleStackSlot()) { | 1604 if (destination.IsDoubleStackSlot()) { |
1594 const intptr_t dest_offset = destination.ToStackSlotOffset(); | 1605 const intptr_t dest_offset = destination.ToStackSlotOffset(); |
1595 VRegister src = source.fpu_reg(); | 1606 VRegister src = source.fpu_reg(); |
1596 __ StoreDToOffset(src, destination.base_reg(), dest_offset, PP); | 1607 __ StoreDToOffset(src, destination.base_reg(), dest_offset); |
1597 } else { | 1608 } else { |
1598 ASSERT(destination.IsQuadStackSlot()); | 1609 ASSERT(destination.IsQuadStackSlot()); |
1599 const intptr_t dest_offset = destination.ToStackSlotOffset(); | 1610 const intptr_t dest_offset = destination.ToStackSlotOffset(); |
1600 __ StoreQToOffset( | 1611 __ StoreQToOffset( |
1601 source.fpu_reg(), destination.base_reg(), dest_offset, PP); | 1612 source.fpu_reg(), destination.base_reg(), dest_offset); |
1602 } | 1613 } |
1603 } | 1614 } |
1604 } else if (source.IsDoubleStackSlot()) { | 1615 } else if (source.IsDoubleStackSlot()) { |
1605 if (destination.IsFpuRegister()) { | 1616 if (destination.IsFpuRegister()) { |
1606 const intptr_t source_offset = source.ToStackSlotOffset(); | 1617 const intptr_t source_offset = source.ToStackSlotOffset(); |
1607 const VRegister dst = destination.fpu_reg(); | 1618 const VRegister dst = destination.fpu_reg(); |
1608 __ LoadDFromOffset(dst, source.base_reg(), source_offset, PP); | 1619 __ LoadDFromOffset(dst, source.base_reg(), source_offset); |
1609 } else { | 1620 } else { |
1610 ASSERT(destination.IsDoubleStackSlot()); | 1621 ASSERT(destination.IsDoubleStackSlot()); |
1611 const intptr_t source_offset = source.ToStackSlotOffset(); | 1622 const intptr_t source_offset = source.ToStackSlotOffset(); |
1612 const intptr_t dest_offset = destination.ToStackSlotOffset(); | 1623 const intptr_t dest_offset = destination.ToStackSlotOffset(); |
1613 __ LoadDFromOffset(VTMP, source.base_reg(), source_offset, PP); | 1624 __ LoadDFromOffset(VTMP, source.base_reg(), source_offset); |
1614 __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset, PP); | 1625 __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset); |
1615 } | 1626 } |
1616 } else if (source.IsQuadStackSlot()) { | 1627 } else if (source.IsQuadStackSlot()) { |
1617 if (destination.IsFpuRegister()) { | 1628 if (destination.IsFpuRegister()) { |
1618 const intptr_t source_offset = source.ToStackSlotOffset(); | 1629 const intptr_t source_offset = source.ToStackSlotOffset(); |
1619 __ LoadQFromOffset( | 1630 __ LoadQFromOffset( |
1620 destination.fpu_reg(), source.base_reg(), source_offset, PP); | 1631 destination.fpu_reg(), source.base_reg(), source_offset); |
1621 } else { | 1632 } else { |
1622 ASSERT(destination.IsQuadStackSlot()); | 1633 ASSERT(destination.IsQuadStackSlot()); |
1623 const intptr_t source_offset = source.ToStackSlotOffset(); | 1634 const intptr_t source_offset = source.ToStackSlotOffset(); |
1624 const intptr_t dest_offset = destination.ToStackSlotOffset(); | 1635 const intptr_t dest_offset = destination.ToStackSlotOffset(); |
1625 __ LoadQFromOffset(VTMP, source.base_reg(), source_offset, PP); | 1636 __ LoadQFromOffset(VTMP, source.base_reg(), source_offset); |
1626 __ StoreQToOffset(VTMP, destination.base_reg(), dest_offset, PP); | 1637 __ StoreQToOffset(VTMP, destination.base_reg(), dest_offset); |
1627 } | 1638 } |
1628 } else { | 1639 } else { |
1629 ASSERT(source.IsConstant()); | 1640 ASSERT(source.IsConstant()); |
1630 const Object& constant = source.constant(); | 1641 const Object& constant = source.constant(); |
1631 if (destination.IsRegister()) { | 1642 if (destination.IsRegister()) { |
1632 if (constant.IsSmi() && | 1643 if (constant.IsSmi() && |
1633 (source.constant_instruction()->representation() == kUnboxedInt32)) { | 1644 (source.constant_instruction()->representation() == kUnboxedInt32)) { |
1634 __ LoadImmediate(destination.reg(), | 1645 __ LoadImmediate(destination.reg(), |
1635 static_cast<int32_t>(Smi::Cast(constant).Value()), | 1646 static_cast<int32_t>(Smi::Cast(constant).Value())); |
1636 PP); | |
1637 } else { | 1647 } else { |
1638 __ LoadObject(destination.reg(), constant, PP); | 1648 __ LoadObject(destination.reg(), constant); |
1639 } | 1649 } |
1640 } else if (destination.IsFpuRegister()) { | 1650 } else if (destination.IsFpuRegister()) { |
1641 const VRegister dst = destination.fpu_reg(); | 1651 const VRegister dst = destination.fpu_reg(); |
1642 if (Utils::DoublesBitEqual(Double::Cast(constant).value(), 0.0)) { | 1652 if (Utils::DoublesBitEqual(Double::Cast(constant).value(), 0.0)) { |
1643 __ veor(dst, dst, dst); | 1653 __ veor(dst, dst, dst); |
1644 } else { | 1654 } else { |
1645 ScratchRegisterScope tmp(this, kNoRegister); | 1655 ScratchRegisterScope tmp(this, kNoRegister); |
1646 __ LoadObject(tmp.reg(), constant, PP); | 1656 __ LoadObject(tmp.reg(), constant); |
1647 __ LoadDFieldFromOffset(dst, tmp.reg(), Double::value_offset(), PP); | 1657 __ LoadDFieldFromOffset(dst, tmp.reg(), Double::value_offset()); |
1648 } | 1658 } |
1649 } else if (destination.IsDoubleStackSlot()) { | 1659 } else if (destination.IsDoubleStackSlot()) { |
1650 if (Utils::DoublesBitEqual(Double::Cast(constant).value(), 0.0)) { | 1660 if (Utils::DoublesBitEqual(Double::Cast(constant).value(), 0.0)) { |
1651 __ veor(VTMP, VTMP, VTMP); | 1661 __ veor(VTMP, VTMP, VTMP); |
1652 } else { | 1662 } else { |
1653 ScratchRegisterScope tmp(this, kNoRegister); | 1663 ScratchRegisterScope tmp(this, kNoRegister); |
1654 __ LoadObject(tmp.reg(), constant, PP); | 1664 __ LoadObject(tmp.reg(), constant); |
1655 __ LoadDFieldFromOffset(VTMP, tmp.reg(), Double::value_offset(), PP); | 1665 __ LoadDFieldFromOffset(VTMP, tmp.reg(), Double::value_offset()); |
1656 } | 1666 } |
1657 const intptr_t dest_offset = destination.ToStackSlotOffset(); | 1667 const intptr_t dest_offset = destination.ToStackSlotOffset(); |
1658 __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset, PP); | 1668 __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset); |
1659 } else { | 1669 } else { |
1660 ASSERT(destination.IsStackSlot()); | 1670 ASSERT(destination.IsStackSlot()); |
1661 const intptr_t dest_offset = destination.ToStackSlotOffset(); | 1671 const intptr_t dest_offset = destination.ToStackSlotOffset(); |
1662 ScratchRegisterScope tmp(this, kNoRegister); | 1672 ScratchRegisterScope tmp(this, kNoRegister); |
1663 if (constant.IsSmi() && | 1673 if (constant.IsSmi() && |
1664 (source.constant_instruction()->representation() == kUnboxedInt32)) { | 1674 (source.constant_instruction()->representation() == kUnboxedInt32)) { |
1665 __ LoadImmediate(tmp.reg(), | 1675 __ LoadImmediate(tmp.reg(), |
1666 static_cast<int32_t>(Smi::Cast(constant).Value()), | 1676 static_cast<int32_t>(Smi::Cast(constant).Value())); |
1667 PP); | |
1668 } else { | 1677 } else { |
1669 __ LoadObject(tmp.reg(), constant, PP); | 1678 __ LoadObject(tmp.reg(), constant); |
1670 } | 1679 } |
1671 __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset, PP); | 1680 __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset); |
1672 } | 1681 } |
1673 } | 1682 } |
1674 | 1683 |
1675 move->Eliminate(); | 1684 move->Eliminate(); |
1676 } | 1685 } |
1677 | 1686 |
1678 | 1687 |
1679 void ParallelMoveResolver::EmitSwap(int index) { | 1688 void ParallelMoveResolver::EmitSwap(int index) { |
1680 MoveOperands* move = moves_[index]; | 1689 MoveOperands* move = moves_[index]; |
1681 const Location source = move->src(); | 1690 const Location source = move->src(); |
(...skipping 30 matching lines...) Expand all Loading... |
1712 VRegister reg = source.IsFpuRegister() ? source.fpu_reg() | 1721 VRegister reg = source.IsFpuRegister() ? source.fpu_reg() |
1713 : destination.fpu_reg(); | 1722 : destination.fpu_reg(); |
1714 Register base_reg = source.IsFpuRegister() | 1723 Register base_reg = source.IsFpuRegister() |
1715 ? destination.base_reg() | 1724 ? destination.base_reg() |
1716 : source.base_reg(); | 1725 : source.base_reg(); |
1717 const intptr_t slot_offset = source.IsFpuRegister() | 1726 const intptr_t slot_offset = source.IsFpuRegister() |
1718 ? destination.ToStackSlotOffset() | 1727 ? destination.ToStackSlotOffset() |
1719 : source.ToStackSlotOffset(); | 1728 : source.ToStackSlotOffset(); |
1720 | 1729 |
1721 if (double_width) { | 1730 if (double_width) { |
1722 __ LoadDFromOffset(VTMP, base_reg, slot_offset, PP); | 1731 __ LoadDFromOffset(VTMP, base_reg, slot_offset); |
1723 __ StoreDToOffset(reg, base_reg, slot_offset, PP); | 1732 __ StoreDToOffset(reg, base_reg, slot_offset); |
1724 __ fmovdd(reg, VTMP); | 1733 __ fmovdd(reg, VTMP); |
1725 } else { | 1734 } else { |
1726 __ LoadQFromOffset(VTMP, base_reg, slot_offset, PP); | 1735 __ LoadQFromOffset(VTMP, base_reg, slot_offset); |
1727 __ StoreQToOffset(reg, base_reg, slot_offset, PP); | 1736 __ StoreQToOffset(reg, base_reg, slot_offset); |
1728 __ vmov(reg, VTMP); | 1737 __ vmov(reg, VTMP); |
1729 } | 1738 } |
1730 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { | 1739 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { |
1731 const intptr_t source_offset = source.ToStackSlotOffset(); | 1740 const intptr_t source_offset = source.ToStackSlotOffset(); |
1732 const intptr_t dest_offset = destination.ToStackSlotOffset(); | 1741 const intptr_t dest_offset = destination.ToStackSlotOffset(); |
1733 | 1742 |
1734 ScratchFpuRegisterScope ensure_scratch(this, kNoFpuRegister); | 1743 ScratchFpuRegisterScope ensure_scratch(this, kNoFpuRegister); |
1735 VRegister scratch = ensure_scratch.reg(); | 1744 VRegister scratch = ensure_scratch.reg(); |
1736 __ LoadDFromOffset(VTMP, source.base_reg(), source_offset, PP); | 1745 __ LoadDFromOffset(VTMP, source.base_reg(), source_offset); |
1737 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset, PP); | 1746 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset); |
1738 __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset, PP); | 1747 __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset); |
1739 __ StoreDToOffset(scratch, source.base_reg(), source_offset, PP); | 1748 __ StoreDToOffset(scratch, source.base_reg(), source_offset); |
1740 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) { | 1749 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) { |
1741 const intptr_t source_offset = source.ToStackSlotOffset(); | 1750 const intptr_t source_offset = source.ToStackSlotOffset(); |
1742 const intptr_t dest_offset = destination.ToStackSlotOffset(); | 1751 const intptr_t dest_offset = destination.ToStackSlotOffset(); |
1743 | 1752 |
1744 ScratchFpuRegisterScope ensure_scratch(this, kNoFpuRegister); | 1753 ScratchFpuRegisterScope ensure_scratch(this, kNoFpuRegister); |
1745 VRegister scratch = ensure_scratch.reg(); | 1754 VRegister scratch = ensure_scratch.reg(); |
1746 __ LoadQFromOffset(VTMP, source.base_reg(), source_offset, PP); | 1755 __ LoadQFromOffset(VTMP, source.base_reg(), source_offset); |
1747 __ LoadQFromOffset(scratch, destination.base_reg(), dest_offset, PP); | 1756 __ LoadQFromOffset(scratch, destination.base_reg(), dest_offset); |
1748 __ StoreQToOffset(VTMP, destination.base_reg(), dest_offset, PP); | 1757 __ StoreQToOffset(VTMP, destination.base_reg(), dest_offset); |
1749 __ StoreQToOffset(scratch, source.base_reg(), source_offset, PP); | 1758 __ StoreQToOffset(scratch, source.base_reg(), source_offset); |
1750 } else { | 1759 } else { |
1751 UNREACHABLE(); | 1760 UNREACHABLE(); |
1752 } | 1761 } |
1753 | 1762 |
1754 // The swap of source and destination has executed a move from source to | 1763 // The swap of source and destination has executed a move from source to |
1755 // destination. | 1764 // destination. |
1756 move->Eliminate(); | 1765 move->Eliminate(); |
1757 | 1766 |
1758 // Any unperformed (including pending) move with a source of either | 1767 // Any unperformed (including pending) move with a source of either |
1759 // this move's source or destination needs to have their source | 1768 // this move's source or destination needs to have their source |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1792 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { | 1801 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { |
1793 UNREACHABLE(); | 1802 UNREACHABLE(); |
1794 } | 1803 } |
1795 | 1804 |
1796 | 1805 |
1797 void ParallelMoveResolver::Exchange(Register reg, | 1806 void ParallelMoveResolver::Exchange(Register reg, |
1798 Register base_reg, | 1807 Register base_reg, |
1799 intptr_t stack_offset) { | 1808 intptr_t stack_offset) { |
1800 ScratchRegisterScope tmp(this, reg); | 1809 ScratchRegisterScope tmp(this, reg); |
1801 __ mov(tmp.reg(), reg); | 1810 __ mov(tmp.reg(), reg); |
1802 __ LoadFromOffset(reg, base_reg, stack_offset, PP); | 1811 __ LoadFromOffset(reg, base_reg, stack_offset); |
1803 __ StoreToOffset(tmp.reg(), base_reg, stack_offset, PP); | 1812 __ StoreToOffset(tmp.reg(), base_reg, stack_offset); |
1804 } | 1813 } |
1805 | 1814 |
1806 | 1815 |
1807 void ParallelMoveResolver::Exchange(Register base_reg1, | 1816 void ParallelMoveResolver::Exchange(Register base_reg1, |
1808 intptr_t stack_offset1, | 1817 intptr_t stack_offset1, |
1809 Register base_reg2, | 1818 Register base_reg2, |
1810 intptr_t stack_offset2) { | 1819 intptr_t stack_offset2) { |
1811 ScratchRegisterScope tmp1(this, kNoRegister); | 1820 ScratchRegisterScope tmp1(this, kNoRegister); |
1812 ScratchRegisterScope tmp2(this, tmp1.reg()); | 1821 ScratchRegisterScope tmp2(this, tmp1.reg()); |
1813 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1, PP); | 1822 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1); |
1814 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2, PP); | 1823 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2); |
1815 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2, PP); | 1824 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2); |
1816 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1, PP); | 1825 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1); |
1817 } | 1826 } |
1818 | 1827 |
1819 | 1828 |
1820 void ParallelMoveResolver::SpillScratch(Register reg) { | 1829 void ParallelMoveResolver::SpillScratch(Register reg) { |
1821 __ Push(reg); | 1830 __ Push(reg); |
1822 } | 1831 } |
1823 | 1832 |
1824 | 1833 |
1825 void ParallelMoveResolver::RestoreScratch(Register reg) { | 1834 void ParallelMoveResolver::RestoreScratch(Register reg) { |
1826 __ Pop(reg); | 1835 __ Pop(reg); |
1827 } | 1836 } |
1828 | 1837 |
1829 | 1838 |
1830 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { | 1839 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { |
1831 __ PushDouble(reg); | 1840 __ PushDouble(reg); |
1832 } | 1841 } |
1833 | 1842 |
1834 | 1843 |
1835 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { | 1844 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { |
1836 __ PopDouble(reg); | 1845 __ PopDouble(reg); |
1837 } | 1846 } |
1838 | 1847 |
1839 | 1848 |
1840 #undef __ | 1849 #undef __ |
1841 | 1850 |
1842 } // namespace dart | 1851 } // namespace dart |
1843 | 1852 |
1844 #endif // defined TARGET_ARCH_ARM64 | 1853 #endif // defined TARGET_ARCH_ARM64 |
OLD | NEW |