Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(169)

Side by Side Diff: runtime/vm/stub_code_arm64.cc

Issue 254673007: More stubs and instructions for arm64. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/simulator_arm64.cc ('k') | runtime/vm/unit_test.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" 5 #include "vm/globals.h"
6 #if defined(TARGET_ARCH_ARM64) 6 #if defined(TARGET_ARCH_ARM64)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/code_generator.h" 9 #include "vm/code_generator.h"
10 #include "vm/compiler.h" 10 #include "vm/compiler.h"
(...skipping 361 matching lines...) Expand 10 before | Expand all | Expand 10 after
372 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { 372 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) {
373 __ Stop("GenerateDeoptimizeStub"); 373 __ Stop("GenerateDeoptimizeStub");
374 } 374 }
375 375
376 376
377 void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) { 377 void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
378 __ Stop("GenerateMegamorphicMissStub"); 378 __ Stop("GenerateMegamorphicMissStub");
379 } 379 }
380 380
381 381
382 // Called for inline allocation of arrays.
383 // Input parameters:
384 // LR: return address.
385 // R2: array length as Smi.
386 // R1: array element type (either NULL or an instantiated type).
387 // NOTE: R2 cannot be clobbered here as the caller relies on it being saved.
388 // The newly allocated object is returned in R0.
382 void StubCode::GenerateAllocateArrayStub(Assembler* assembler) { 389 void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
383 __ Stop("GenerateAllocateArrayStub"); 390 Label slow_case;
391 if (FLAG_inline_alloc) {
392 // Compute the size to be allocated, it is based on the array length
393 // and is computed as:
394 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)).
395 // Assert that length is a Smi.
396 __ tsti(R2, kSmiTagMask);
397 if (FLAG_use_slow_path) {
398 __ b(&slow_case);
399 } else {
400 __ b(&slow_case, NE);
401 }
402 __ LoadFieldFromOffset(R8, CTX, Context::isolate_offset());
403 __ LoadFromOffset(R8, R8, Isolate::heap_offset());
404 __ LoadFromOffset(R8, R8, Heap::new_space_offset());
405
406 // Calculate and align allocation size.
407 // Load new object start and calculate next object start.
408 // R1: array element type.
409 // R2: array length as Smi.
410 // R8: points to new space object.
411 __ LoadFromOffset(R0, R8, Scavenger::top_offset());
412 intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1;
413 __ LoadImmediate(R3, fixed_size, kNoPP);
414 __ add(R3, R3, Operand(R2, LSL, 2)); // R2 is Smi.
415 ASSERT(kSmiTagShift == 1);
416 __ andi(R3, R3, ~(kObjectAlignment - 1));
417 __ add(R7, R3, Operand(R0));
418
419 // Check if the allocation fits into the remaining space.
420 // R0: potential new object start.
421 // R1: array element type.
422 // R2: array length as Smi.
423 // R3: array size.
424 // R7: potential next object start.
425 // R8: points to new space object.
426 __ LoadFromOffset(TMP, R8, Scavenger::end_offset());
427 __ CompareRegisters(R7, TMP);
428 __ b(&slow_case, CS); // Branch if unsigned higher or equal.
429
430 // Successfully allocated the object(s), now update top to point to
431 // next object start and initialize the object.
432 // R0: potential new object start.
433 // R3: array size.
434 // R7: potential next object start.
435 // R8: Points to new space object.
436 __ StoreToOffset(R7, R8, Scavenger::top_offset());
437 __ add(R0, R0, Operand(kHeapObjectTag));
438 __ UpdateAllocationStatsWithSize(kArrayCid, R3, R8, kNoPP);
439
440 // R0: new object start as a tagged pointer.
441 // R1: array element type.
442 // R2: array length as Smi.
443 // R3: array size.
444 // R7: new object end address.
445
446 // Store the type argument field.
447 __ StoreIntoObjectNoBarrier(
448 R0,
449 FieldAddress(R0, Array::type_arguments_offset()),
450 R1);
451
452 // Set the length field.
453 __ StoreIntoObjectNoBarrier(
454 R0,
455 FieldAddress(R0, Array::length_offset()),
456 R2);
457
458 // Calculate the size tag.
459 // R0: new object start as a tagged pointer.
460 // R2: array length as Smi.
461 // R3: array size.
462 // R7: new object end address.
463 const intptr_t shift = RawObject::kSizeTagBit - kObjectAlignmentLog2;
464 __ CompareImmediate(R3, RawObject::SizeTag::kMaxSizeTag, kNoPP);
465 // If no size tag overflow, shift R1 left, else set R1 to zero.
466 __ Lsl(TMP, R3, shift);
467 __ LoadImmediate(TMP2, 0, kNoPP);
468 __ csel(R1, TMP, R1, LS);
469 __ csel(R1, TMP2, R1, HI);
regis 2014/04/29 23:09:21 Can't you use ZR register here?
zra 2014/04/29 23:18:19 Done.
470
471 // Get the class index and insert it into the tags.
472 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(kArrayCid), kNoPP);
473 __ orr(R1, R1, Operand(TMP));
474 __ StoreFieldToOffset(R1, R0, Array::tags_offset());
475
476 // Initialize all array elements to raw_null.
477 // R0: new object start as a tagged pointer.
478 // R7: new object end address.
479 // R2: array length as Smi.
480 __ AddImmediate(R1, R0, Array::data_offset() - kHeapObjectTag, kNoPP);
481 // R1: iterator which initially points to the start of the variable
482 // data area to be initialized.
483 __ LoadObject(TMP, Object::null_object(), PP);
484 Label loop, done;
485 __ Bind(&loop);
486 // TODO(cshapiro): StoreIntoObjectNoBarrier
regis 2014/04/29 23:09:21 Mmm, any volunteer?
zra 2014/04/29 23:18:19 Maybe we can chat about this tomorrow.
487 __ CompareRegisters(R1, R7);
488 __ b(&done, CS);
489 __ str(TMP, Address(R1)); // Store if unsigned lower.
490 __ AddImmediate(R1, R1, kWordSize, kNoPP);
491 __ b(&loop); // Loop until R1 == R7.
492 __ Bind(&done);
493
494 // Done allocating and initializing the array.
495 // R0: new object.
496 // R2: array length as Smi (preserved for the caller.)
497 __ ret();
498 }
499
500 // Unable to allocate the array using the fast inline code, just call
501 // into the runtime.
502 __ Bind(&slow_case);
503 // Create a stub frame as we are pushing some objects on the stack before
504 // calling into the runtime.
505 __ EnterStubFrame();
506 __ LoadObject(TMP, Object::null_object(), PP);
507 // Setup space on stack for return value.
508 // Push array length as Smi and element type.
509 __ Push(TMP);
510 __ Push(R2);
511 __ Push(R1);
512 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
513 // Pop arguments; result is popped in IP.
514 __ Pop(R1);
515 __ Pop(R2);
516 __ Pop(TMP);
517 __ mov(R0, TMP);
518 __ LeaveStubFrame();
519 __ ret();
384 } 520 }
385 521
386 522
387 // Called when invoking Dart code from C++ (VM code). 523 // Called when invoking Dart code from C++ (VM code).
388 // Input parameters: 524 // Input parameters:
389 // LR : points to return address. 525 // LR : points to return address.
390 // R0 : entrypoint of the Dart function to call. 526 // R0 : entrypoint of the Dart function to call.
391 // R1 : arguments descriptor array. 527 // R1 : arguments descriptor array.
392 // R2 : arguments array. 528 // R2 : arguments array.
393 // R3 : new context containing the current isolate pointer. 529 // R3 : new context containing the current isolate pointer.
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
528 } 664 }
529 665
530 // TODO(zra): Restore callee-saved fpu registers. 666 // TODO(zra): Restore callee-saved fpu registers.
531 667
532 // Restore the frame pointer and return. 668 // Restore the frame pointer and return.
533 __ LeaveFrame(); 669 __ LeaveFrame();
534 __ ret(); 670 __ ret();
535 } 671 }
536 672
537 673
674 // Called for inline allocation of contexts.
675 // Input:
676 // R1: number of context variables.
677 // Output:
678 // R0: new allocated RawContext object.
538 void StubCode::GenerateAllocateContextStub(Assembler* assembler) { 679 void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
539 __ Stop("GenerateAllocateContextStub"); 680 if (FLAG_inline_alloc) {
681 const Class& context_class = Class::ZoneHandle(Object::context_class());
682 Label slow_case;
683 Heap* heap = Isolate::Current()->heap();
684 // First compute the rounded instance size.
685 // R1: number of context variables.
686 intptr_t fixed_size = sizeof(RawContext) + kObjectAlignment - 1;
687 __ LoadImmediate(R2, fixed_size, kNoPP);
688 __ add(R2, R2, Operand(R1, LSL, 3));
689 ASSERT(kSmiTagShift == 1);
690 __ andi(R2, R2, ~(kObjectAlignment - 1));
691
692 // Now allocate the object.
693 // R1: number of context variables.
694 // R2: object size.
695 __ LoadImmediate(R5, heap->TopAddress(), kNoPP);
696 __ ldr(R0, Address(R5));
697 __ add(R3, R2, Operand(R0));
698 // Check if the allocation fits into the remaining space.
699 // R0: potential new object.
700 // R1: number of context variables.
701 // R2: object size.
702 // R3: potential next object start.
703 __ LoadImmediate(TMP, heap->EndAddress(), kNoPP);
704 __ ldr(TMP, Address(TMP));
705 __ CompareRegisters(R3, TMP);
706 if (FLAG_use_slow_path) {
707 __ b(&slow_case);
708 } else {
709 __ b(&slow_case, CS); // Branch if unsigned higher or equal.
710 }
711
712 // Successfully allocated the object, now update top to point to
713 // next object start and initialize the object.
714 // R0: new object.
715 // R1: number of context variables.
716 // R2: object size.
717 // R3: next object start.
718 __ str(R3, Address(R5));
719 __ add(R0, R0, Operand(kHeapObjectTag));
720 __ UpdateAllocationStatsWithSize(context_class.id(), R2, R5, kNoPP);
721
722 // Calculate the size tag.
723 // R0: new object.
724 // R1: number of context variables.
725 // R2: object size.
726 const intptr_t shift = RawObject::kSizeTagBit - kObjectAlignmentLog2;
727 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag, kNoPP);
728 // If no size tag overflow, shift R2 left, else set R2 to zero.
729 __ Lsl(TMP, R2, shift);
730 __ LoadImmediate(TMP2, 0, kNoPP);
731 __ csel(R2, TMP, R2, LS);
732 __ csel(R2, TMP2, R2, HI);
regis 2014/04/29 23:09:21 ditto
zra 2014/04/29 23:18:19 Done. Thanks for spotting this.
733
734 // Get the class index and insert it into the tags.
735 // R2: size and bit tags.
736 __ LoadImmediate(
737 TMP, RawObject::ClassIdTag::encode(context_class.id()), kNoPP);
738 __ orr(R2, R2, Operand(TMP));
739 __ StoreFieldToOffset(R2, R0, Context::tags_offset());
740
741 // Setup up number of context variables field.
742 // R0: new object.
743 // R1: number of context variables as integer value (not object).
744 __ StoreFieldToOffset(R1, R0, Context::num_variables_offset());
745
746 // Setup isolate field.
747 // Load Isolate pointer from Context structure into R2.
748 // R0: new object.
749 // R1: number of context variables.
750 __ LoadFieldFromOffset(R2, CTX, Context::isolate_offset());
751 // R2: isolate, not an object.
752 __ StoreFieldToOffset(R2, R0, Context::isolate_offset());
753
754 // Setup the parent field.
755 // R0: new object.
756 // R1: number of context variables.
757 __ LoadObject(R2, Object::null_object(), PP);
758 __ StoreFieldToOffset(R2, R0, Context::parent_offset());
759
760 // Initialize the context variables.
761 // R0: new object.
762 // R1: number of context variables.
763 // R2: raw null.
764 Label loop, done;
765 __ AddImmediate(
766 R3, R0, Context::variable_offset(0) - kHeapObjectTag, kNoPP);
767 __ Bind(&loop);
768 __ subs(R1, R1, Operand(1));
769 __ b(&done, MI);
770 __ str(R2, Address(R3, R1, UXTX, Address::Scaled));
771 __ b(&loop, NE); // Loop if R1 not zero.
772 __ Bind(&done);
773
774 // Done allocating and initializing the context.
775 // R0: new object.
776 __ ret();
777
778 __ Bind(&slow_case);
779 }
780 // Create a stub frame as we are pushing some objects on the stack before
781 // calling into the runtime.
782 __ EnterStubFrame();
783 // Setup space on stack for return value.
784 __ LoadObject(R2, Object::null_object(), PP);
785 __ SmiTag(R1);
786 __ Push(R2);
787 __ Push(R1);
788 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
789 __ Drop(1); // Pop number of context variables argument.
790 __ Pop(R0); // Pop the new context object.
791 // R0: new object
792 // Restore the frame pointer.
793 __ LeaveStubFrame();
794 __ ret();
540 } 795 }
541 796
542 797
543 DECLARE_LEAF_RUNTIME_ENTRY(void, StoreBufferBlockProcess, Isolate* isolate); 798 DECLARE_LEAF_RUNTIME_ENTRY(void, StoreBufferBlockProcess, Isolate* isolate);
544 799
545 // Helper stub to implement Assembler::StoreIntoObject. 800 // Helper stub to implement Assembler::StoreIntoObject.
546 // Input parameters: 801 // Input parameters:
547 // R0: Address being stored 802 // R0: Address being stored
548 void StubCode::GenerateUpdateStoreBufferStub(Assembler* assembler) { 803 void StubCode::GenerateUpdateStoreBufferStub(Assembler* assembler) {
549 Label add_to_buffer; 804 Label add_to_buffer;
(...skipping 421 matching lines...) Expand 10 before | Expand all | Expand 10 after
971 1226
972 1227
973 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { 1228 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) {
974 GenerateUsageCounterIncrement(assembler, R6); 1229 GenerateUsageCounterIncrement(assembler, R6);
975 GenerateNArgsCheckInlineCacheStub( 1230 GenerateNArgsCheckInlineCacheStub(
976 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry); 1231 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry);
977 } 1232 }
978 1233
979 1234
980 void StubCode::GenerateThreeArgsCheckInlineCacheStub(Assembler* assembler) { 1235 void StubCode::GenerateThreeArgsCheckInlineCacheStub(Assembler* assembler) {
981 __ Stop("GenerateThreeArgsCheckInlineCacheStub"); 1236 GenerateUsageCounterIncrement(assembler, R6);
1237 GenerateNArgsCheckInlineCacheStub(
1238 assembler, 3, kInlineCacheMissHandlerThreeArgsRuntimeEntry);
982 } 1239 }
983 1240
984 1241
985 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( 1242 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub(
986 Assembler* assembler) { 1243 Assembler* assembler) {
987 __ Stop("GenerateOneArgOptimizedCheckInlineCacheStub"); 1244 __ Stop("GenerateOneArgOptimizedCheckInlineCacheStub");
988 } 1245 }
989 1246
990 1247
991 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub( 1248 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub(
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after
1249 1506
1250 1507
1251 void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub( 1508 void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub(
1252 Assembler* assembler) { 1509 Assembler* assembler) {
1253 __ Stop("GenerateOptimizedIdenticalWithNumberCheckStub"); 1510 __ Stop("GenerateOptimizedIdenticalWithNumberCheckStub");
1254 } 1511 }
1255 1512
1256 } // namespace dart 1513 } // namespace dart
1257 1514
1258 #endif // defined TARGET_ARCH_ARM64 1515 #endif // defined TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « runtime/vm/simulator_arm64.cc ('k') | runtime/vm/unit_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698