OLD | NEW |
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" | 5 #include "vm/globals.h" |
6 #if defined(TARGET_ARCH_ARM64) | 6 #if defined(TARGET_ARCH_ARM64) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/code_generator.h" | 9 #include "vm/code_generator.h" |
10 #include "vm/compiler.h" | 10 #include "vm/compiler.h" |
(...skipping 361 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
372 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { | 372 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { |
373 __ Stop("GenerateDeoptimizeStub"); | 373 __ Stop("GenerateDeoptimizeStub"); |
374 } | 374 } |
375 | 375 |
376 | 376 |
377 void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) { | 377 void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) { |
378 __ Stop("GenerateMegamorphicMissStub"); | 378 __ Stop("GenerateMegamorphicMissStub"); |
379 } | 379 } |
380 | 380 |
381 | 381 |
| 382 // Called for inline allocation of arrays. |
| 383 // Input parameters: |
| 384 // LR: return address. |
| 385 // R2: array length as Smi. |
| 386 // R1: array element type (either NULL or an instantiated type). |
| 387 // NOTE: R2 cannot be clobbered here as the caller relies on it being saved. |
| 388 // The newly allocated object is returned in R0. |
382 void StubCode::GenerateAllocateArrayStub(Assembler* assembler) { | 389 void StubCode::GenerateAllocateArrayStub(Assembler* assembler) { |
383 __ Stop("GenerateAllocateArrayStub"); | 390 Label slow_case; |
| 391 if (FLAG_inline_alloc) { |
| 392 // Compute the size to be allocated, it is based on the array length |
| 393 // and is computed as: |
| 394 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)). |
| 395 // Assert that length is a Smi. |
| 396 __ tsti(R2, kSmiTagMask); |
| 397 if (FLAG_use_slow_path) { |
| 398 __ b(&slow_case); |
| 399 } else { |
| 400 __ b(&slow_case, NE); |
| 401 } |
| 402 __ LoadFieldFromOffset(R8, CTX, Context::isolate_offset()); |
| 403 __ LoadFromOffset(R8, R8, Isolate::heap_offset()); |
| 404 __ LoadFromOffset(R8, R8, Heap::new_space_offset()); |
| 405 |
| 406 // Calculate and align allocation size. |
| 407 // Load new object start and calculate next object start. |
| 408 // R1: array element type. |
| 409 // R2: array length as Smi. |
| 410 // R8: points to new space object. |
| 411 __ LoadFromOffset(R0, R8, Scavenger::top_offset()); |
| 412 intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1; |
| 413 __ LoadImmediate(R3, fixed_size, kNoPP); |
| 414 __ add(R3, R3, Operand(R2, LSL, 2)); // R2 is Smi. |
| 415 ASSERT(kSmiTagShift == 1); |
| 416 __ andi(R3, R3, ~(kObjectAlignment - 1)); |
| 417 __ add(R7, R3, Operand(R0)); |
| 418 |
| 419 // Check if the allocation fits into the remaining space. |
| 420 // R0: potential new object start. |
| 421 // R1: array element type. |
| 422 // R2: array length as Smi. |
| 423 // R3: array size. |
| 424 // R7: potential next object start. |
| 425 // R8: points to new space object. |
| 426 __ LoadFromOffset(TMP, R8, Scavenger::end_offset()); |
| 427 __ CompareRegisters(R7, TMP); |
| 428 __ b(&slow_case, CS); // Branch if unsigned higher or equal. |
| 429 |
| 430 // Successfully allocated the object(s), now update top to point to |
| 431 // next object start and initialize the object. |
| 432 // R0: potential new object start. |
| 433 // R3: array size. |
| 434 // R7: potential next object start. |
| 435 // R8: Points to new space object. |
| 436 __ StoreToOffset(R7, R8, Scavenger::top_offset()); |
| 437 __ add(R0, R0, Operand(kHeapObjectTag)); |
| 438 __ UpdateAllocationStatsWithSize(kArrayCid, R3, R8, kNoPP); |
| 439 |
| 440 // R0: new object start as a tagged pointer. |
| 441 // R1: array element type. |
| 442 // R2: array length as Smi. |
| 443 // R3: array size. |
| 444 // R7: new object end address. |
| 445 |
| 446 // Store the type argument field. |
| 447 __ StoreIntoObjectNoBarrier( |
| 448 R0, |
| 449 FieldAddress(R0, Array::type_arguments_offset()), |
| 450 R1); |
| 451 |
| 452 // Set the length field. |
| 453 __ StoreIntoObjectNoBarrier( |
| 454 R0, |
| 455 FieldAddress(R0, Array::length_offset()), |
| 456 R2); |
| 457 |
| 458 // Calculate the size tag. |
| 459 // R0: new object start as a tagged pointer. |
| 460 // R2: array length as Smi. |
| 461 // R3: array size. |
| 462 // R7: new object end address. |
| 463 const intptr_t shift = RawObject::kSizeTagBit - kObjectAlignmentLog2; |
| 464 __ CompareImmediate(R3, RawObject::SizeTag::kMaxSizeTag, kNoPP); |
| 465 // If no size tag overflow, shift R1 left, else set R1 to zero. |
| 466 __ Lsl(TMP, R3, shift); |
| 467 __ csel(R1, TMP, R1, LS); |
| 468 __ csel(R1, ZR, R1, HI); |
| 469 |
| 470 // Get the class index and insert it into the tags. |
| 471 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(kArrayCid), kNoPP); |
| 472 __ orr(R1, R1, Operand(TMP)); |
| 473 __ StoreFieldToOffset(R1, R0, Array::tags_offset()); |
| 474 |
| 475 // Initialize all array elements to raw_null. |
| 476 // R0: new object start as a tagged pointer. |
| 477 // R7: new object end address. |
| 478 // R2: array length as Smi. |
| 479 __ AddImmediate(R1, R0, Array::data_offset() - kHeapObjectTag, kNoPP); |
| 480 // R1: iterator which initially points to the start of the variable |
| 481 // data area to be initialized. |
| 482 __ LoadObject(TMP, Object::null_object(), PP); |
| 483 Label loop, done; |
| 484 __ Bind(&loop); |
| 485 // TODO(cshapiro): StoreIntoObjectNoBarrier |
| 486 __ CompareRegisters(R1, R7); |
| 487 __ b(&done, CS); |
| 488 __ str(TMP, Address(R1)); // Store if unsigned lower. |
| 489 __ AddImmediate(R1, R1, kWordSize, kNoPP); |
| 490 __ b(&loop); // Loop until R1 == R7. |
| 491 __ Bind(&done); |
| 492 |
| 493 // Done allocating and initializing the array. |
| 494 // R0: new object. |
| 495 // R2: array length as Smi (preserved for the caller.) |
| 496 __ ret(); |
| 497 } |
| 498 |
| 499 // Unable to allocate the array using the fast inline code, just call |
| 500 // into the runtime. |
| 501 __ Bind(&slow_case); |
| 502 // Create a stub frame as we are pushing some objects on the stack before |
| 503 // calling into the runtime. |
| 504 __ EnterStubFrame(); |
| 505 __ LoadObject(TMP, Object::null_object(), PP); |
| 506 // Setup space on stack for return value. |
| 507 // Push array length as Smi and element type. |
| 508 __ Push(TMP); |
| 509 __ Push(R2); |
| 510 __ Push(R1); |
| 511 __ CallRuntime(kAllocateArrayRuntimeEntry, 2); |
| 512 // Pop arguments; result is popped in IP. |
| 513 __ Pop(R1); |
| 514 __ Pop(R2); |
| 515 __ Pop(TMP); |
| 516 __ mov(R0, TMP); |
| 517 __ LeaveStubFrame(); |
| 518 __ ret(); |
384 } | 519 } |
385 | 520 |
386 | 521 |
387 // Called when invoking Dart code from C++ (VM code). | 522 // Called when invoking Dart code from C++ (VM code). |
388 // Input parameters: | 523 // Input parameters: |
389 // LR : points to return address. | 524 // LR : points to return address. |
390 // R0 : entrypoint of the Dart function to call. | 525 // R0 : entrypoint of the Dart function to call. |
391 // R1 : arguments descriptor array. | 526 // R1 : arguments descriptor array. |
392 // R2 : arguments array. | 527 // R2 : arguments array. |
393 // R3 : new context containing the current isolate pointer. | 528 // R3 : new context containing the current isolate pointer. |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
528 } | 663 } |
529 | 664 |
530 // TODO(zra): Restore callee-saved fpu registers. | 665 // TODO(zra): Restore callee-saved fpu registers. |
531 | 666 |
532 // Restore the frame pointer and return. | 667 // Restore the frame pointer and return. |
533 __ LeaveFrame(); | 668 __ LeaveFrame(); |
534 __ ret(); | 669 __ ret(); |
535 } | 670 } |
536 | 671 |
537 | 672 |
| 673 // Called for inline allocation of contexts. |
| 674 // Input: |
| 675 // R1: number of context variables. |
| 676 // Output: |
| 677 // R0: new allocated RawContext object. |
538 void StubCode::GenerateAllocateContextStub(Assembler* assembler) { | 678 void StubCode::GenerateAllocateContextStub(Assembler* assembler) { |
539 __ Stop("GenerateAllocateContextStub"); | 679 if (FLAG_inline_alloc) { |
| 680 const Class& context_class = Class::ZoneHandle(Object::context_class()); |
| 681 Label slow_case; |
| 682 Heap* heap = Isolate::Current()->heap(); |
| 683 // First compute the rounded instance size. |
| 684 // R1: number of context variables. |
| 685 intptr_t fixed_size = sizeof(RawContext) + kObjectAlignment - 1; |
| 686 __ LoadImmediate(R2, fixed_size, kNoPP); |
| 687 __ add(R2, R2, Operand(R1, LSL, 3)); |
| 688 ASSERT(kSmiTagShift == 1); |
| 689 __ andi(R2, R2, ~(kObjectAlignment - 1)); |
| 690 |
| 691 // Now allocate the object. |
| 692 // R1: number of context variables. |
| 693 // R2: object size. |
| 694 __ LoadImmediate(R5, heap->TopAddress(), kNoPP); |
| 695 __ ldr(R0, Address(R5)); |
| 696 __ add(R3, R2, Operand(R0)); |
| 697 // Check if the allocation fits into the remaining space. |
| 698 // R0: potential new object. |
| 699 // R1: number of context variables. |
| 700 // R2: object size. |
| 701 // R3: potential next object start. |
| 702 __ LoadImmediate(TMP, heap->EndAddress(), kNoPP); |
| 703 __ ldr(TMP, Address(TMP)); |
| 704 __ CompareRegisters(R3, TMP); |
| 705 if (FLAG_use_slow_path) { |
| 706 __ b(&slow_case); |
| 707 } else { |
| 708 __ b(&slow_case, CS); // Branch if unsigned higher or equal. |
| 709 } |
| 710 |
| 711 // Successfully allocated the object, now update top to point to |
| 712 // next object start and initialize the object. |
| 713 // R0: new object. |
| 714 // R1: number of context variables. |
| 715 // R2: object size. |
| 716 // R3: next object start. |
| 717 __ str(R3, Address(R5)); |
| 718 __ add(R0, R0, Operand(kHeapObjectTag)); |
| 719 __ UpdateAllocationStatsWithSize(context_class.id(), R2, R5, kNoPP); |
| 720 |
| 721 // Calculate the size tag. |
| 722 // R0: new object. |
| 723 // R1: number of context variables. |
| 724 // R2: object size. |
| 725 const intptr_t shift = RawObject::kSizeTagBit - kObjectAlignmentLog2; |
| 726 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag, kNoPP); |
| 727 // If no size tag overflow, shift R2 left, else set R2 to zero. |
| 728 __ Lsl(TMP, R2, shift); |
| 729 __ csel(R2, TMP, R2, LS); |
| 730 __ csel(R2, ZR, R2, HI); |
| 731 |
| 732 // Get the class index and insert it into the tags. |
| 733 // R2: size and bit tags. |
| 734 __ LoadImmediate( |
| 735 TMP, RawObject::ClassIdTag::encode(context_class.id()), kNoPP); |
| 736 __ orr(R2, R2, Operand(TMP)); |
| 737 __ StoreFieldToOffset(R2, R0, Context::tags_offset()); |
| 738 |
| 739 // Setup up number of context variables field. |
| 740 // R0: new object. |
| 741 // R1: number of context variables as integer value (not object). |
| 742 __ StoreFieldToOffset(R1, R0, Context::num_variables_offset()); |
| 743 |
| 744 // Setup isolate field. |
| 745 // Load Isolate pointer from Context structure into R2. |
| 746 // R0: new object. |
| 747 // R1: number of context variables. |
| 748 __ LoadFieldFromOffset(R2, CTX, Context::isolate_offset()); |
| 749 // R2: isolate, not an object. |
| 750 __ StoreFieldToOffset(R2, R0, Context::isolate_offset()); |
| 751 |
| 752 // Setup the parent field. |
| 753 // R0: new object. |
| 754 // R1: number of context variables. |
| 755 __ LoadObject(R2, Object::null_object(), PP); |
| 756 __ StoreFieldToOffset(R2, R0, Context::parent_offset()); |
| 757 |
| 758 // Initialize the context variables. |
| 759 // R0: new object. |
| 760 // R1: number of context variables. |
| 761 // R2: raw null. |
| 762 Label loop, done; |
| 763 __ AddImmediate( |
| 764 R3, R0, Context::variable_offset(0) - kHeapObjectTag, kNoPP); |
| 765 __ Bind(&loop); |
| 766 __ subs(R1, R1, Operand(1)); |
| 767 __ b(&done, MI); |
| 768 __ str(R2, Address(R3, R1, UXTX, Address::Scaled)); |
| 769 __ b(&loop, NE); // Loop if R1 not zero. |
| 770 __ Bind(&done); |
| 771 |
| 772 // Done allocating and initializing the context. |
| 773 // R0: new object. |
| 774 __ ret(); |
| 775 |
| 776 __ Bind(&slow_case); |
| 777 } |
| 778 // Create a stub frame as we are pushing some objects on the stack before |
| 779 // calling into the runtime. |
| 780 __ EnterStubFrame(); |
| 781 // Setup space on stack for return value. |
| 782 __ LoadObject(R2, Object::null_object(), PP); |
| 783 __ SmiTag(R1); |
| 784 __ Push(R2); |
| 785 __ Push(R1); |
| 786 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context. |
| 787 __ Drop(1); // Pop number of context variables argument. |
| 788 __ Pop(R0); // Pop the new context object. |
| 789 // R0: new object |
| 790 // Restore the frame pointer. |
| 791 __ LeaveStubFrame(); |
| 792 __ ret(); |
540 } | 793 } |
541 | 794 |
542 | 795 |
543 DECLARE_LEAF_RUNTIME_ENTRY(void, StoreBufferBlockProcess, Isolate* isolate); | 796 DECLARE_LEAF_RUNTIME_ENTRY(void, StoreBufferBlockProcess, Isolate* isolate); |
544 | 797 |
545 // Helper stub to implement Assembler::StoreIntoObject. | 798 // Helper stub to implement Assembler::StoreIntoObject. |
546 // Input parameters: | 799 // Input parameters: |
547 // R0: Address being stored | 800 // R0: Address being stored |
548 void StubCode::GenerateUpdateStoreBufferStub(Assembler* assembler) { | 801 void StubCode::GenerateUpdateStoreBufferStub(Assembler* assembler) { |
549 Label add_to_buffer; | 802 Label add_to_buffer; |
(...skipping 421 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
971 | 1224 |
972 | 1225 |
973 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { | 1226 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { |
974 GenerateUsageCounterIncrement(assembler, R6); | 1227 GenerateUsageCounterIncrement(assembler, R6); |
975 GenerateNArgsCheckInlineCacheStub( | 1228 GenerateNArgsCheckInlineCacheStub( |
976 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry); | 1229 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry); |
977 } | 1230 } |
978 | 1231 |
979 | 1232 |
980 void StubCode::GenerateThreeArgsCheckInlineCacheStub(Assembler* assembler) { | 1233 void StubCode::GenerateThreeArgsCheckInlineCacheStub(Assembler* assembler) { |
981 __ Stop("GenerateThreeArgsCheckInlineCacheStub"); | 1234 GenerateUsageCounterIncrement(assembler, R6); |
| 1235 GenerateNArgsCheckInlineCacheStub( |
| 1236 assembler, 3, kInlineCacheMissHandlerThreeArgsRuntimeEntry); |
982 } | 1237 } |
983 | 1238 |
984 | 1239 |
985 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( | 1240 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( |
986 Assembler* assembler) { | 1241 Assembler* assembler) { |
987 __ Stop("GenerateOneArgOptimizedCheckInlineCacheStub"); | 1242 __ Stop("GenerateOneArgOptimizedCheckInlineCacheStub"); |
988 } | 1243 } |
989 | 1244 |
990 | 1245 |
991 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub( | 1246 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub( |
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1249 | 1504 |
1250 | 1505 |
1251 void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub( | 1506 void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub( |
1252 Assembler* assembler) { | 1507 Assembler* assembler) { |
1253 __ Stop("GenerateOptimizedIdenticalWithNumberCheckStub"); | 1508 __ Stop("GenerateOptimizedIdenticalWithNumberCheckStub"); |
1254 } | 1509 } |
1255 | 1510 |
1256 } // namespace dart | 1511 } // namespace dart |
1257 | 1512 |
1258 #endif // defined TARGET_ARCH_ARM64 | 1513 #endif // defined TARGET_ARCH_ARM64 |
OLD | NEW |