Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(338)

Side by Side Diff: runtime/vm/assembler_x64.cc

Issue 1263513002: VM: Load allocation-top and -end via Thread. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: arm, arm64 and mips Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // NOLINT 5 #include "vm/globals.h" // NOLINT
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/cpu.h" 9 #include "vm/cpu.h"
10 #include "vm/heap.h" 10 #include "vm/heap.h"
(...skipping 3504 matching lines...) Expand 10 before | Expand all | Expand 10 after
3515 Register temp_reg = TMP; 3515 Register temp_reg = TMP;
3516 intptr_t size_offset = ClassTable::SizeOffsetFor(cid, space == Heap::kNew); 3516 intptr_t size_offset = ClassTable::SizeOffsetFor(cid, space == Heap::kNew);
3517 addq(Address(temp_reg, size_offset), Immediate(size_in_bytes)); 3517 addq(Address(temp_reg, size_offset), Immediate(size_in_bytes));
3518 } 3518 }
3519 3519
3520 3520
3521 void Assembler::TryAllocate(const Class& cls, 3521 void Assembler::TryAllocate(const Class& cls,
3522 Label* failure, 3522 Label* failure,
3523 bool near_jump, 3523 bool near_jump,
3524 Register instance_reg, 3524 Register instance_reg,
3525 Register pp) { 3525 Register pp,
3526 Register temp) {
3526 ASSERT(failure != NULL); 3527 ASSERT(failure != NULL);
3527 if (FLAG_inline_alloc) { 3528 if (FLAG_inline_alloc) {
3528 // If this allocation is traced, program will jump to failure path 3529 // If this allocation is traced, program will jump to failure path
3529 // (i.e. the allocation stub) which will allocate the object and trace the 3530 // (i.e. the allocation stub) which will allocate the object and trace the
3530 // allocation call site. 3531 // allocation call site.
3531 MaybeTraceAllocation(cls.id(), failure, near_jump); 3532 MaybeTraceAllocation(cls.id(), failure, near_jump,
3532 Heap* heap = Isolate::Current()->heap(); 3533 /* inline_isolate = */ false);
3533 const intptr_t instance_size = cls.instance_size(); 3534 const intptr_t instance_size = cls.instance_size();
3534 Heap::Space space = heap->SpaceForAllocation(cls.id()); 3535 Heap::Space space = Heap::SpaceForAllocation(cls.id());
3535 LoadImmediate(TMP, Immediate(heap->TopAddress(space)), pp); 3536 movq(temp, Address(THR, Thread::heap_offset()));
3536 movq(instance_reg, Address(TMP, 0)); 3537 movq(instance_reg, Address(temp, Heap::TopOffset(space)));
3537 AddImmediate(instance_reg, Immediate(instance_size), pp); 3538 AddImmediate(instance_reg, Immediate(instance_size), pp);
3538 // instance_reg: potential next object start. 3539 // instance_reg: potential next object start.
3539 LoadImmediate(TMP, Immediate(heap->EndAddress(space)), pp); 3540 cmpq(instance_reg, Address(temp, Heap::EndOffset(space)));
3540 cmpq(instance_reg, Address(TMP, 0));
3541 j(ABOVE_EQUAL, failure, near_jump); 3541 j(ABOVE_EQUAL, failure, near_jump);
3542 // Successfully allocated the object, now update top to point to 3542 // Successfully allocated the object, now update top to point to
3543 // next object start and store the class in the class field of object. 3543 // next object start and store the class in the class field of object.
3544 LoadImmediate(TMP, Immediate(heap->TopAddress(space)), pp); 3544 movq(Address(temp, Heap::TopOffset(space)), instance_reg);
3545 movq(Address(TMP, 0), instance_reg); 3545 UpdateAllocationStats(cls.id(), space, /* inline_isolate = */ false);
3546 UpdateAllocationStats(cls.id(), space);
3547 ASSERT(instance_size >= kHeapObjectTag); 3546 ASSERT(instance_size >= kHeapObjectTag);
3548 AddImmediate(instance_reg, Immediate(kHeapObjectTag - instance_size), pp); 3547 AddImmediate(instance_reg, Immediate(kHeapObjectTag - instance_size), pp);
3549 uword tags = 0; 3548 uword tags = 0;
3550 tags = RawObject::SizeTag::update(instance_size, tags); 3549 tags = RawObject::SizeTag::update(instance_size, tags);
3551 ASSERT(cls.id() != kIllegalCid); 3550 ASSERT(cls.id() != kIllegalCid);
3552 tags = RawObject::ClassIdTag::update(cls.id(), tags); 3551 tags = RawObject::ClassIdTag::update(cls.id(), tags);
3553 MoveImmediate(FieldAddress(instance_reg, Object::tags_offset()), 3552 MoveImmediate(FieldAddress(instance_reg, Object::tags_offset()),
3554 Immediate(tags), pp); 3553 Immediate(tags), pp);
3555 } else { 3554 } else {
3556 jmp(failure); 3555 jmp(failure);
3557 } 3556 }
3558 } 3557 }
3559 3558
3560 3559
3561 void Assembler::TryAllocateArray(intptr_t cid, 3560 void Assembler::TryAllocateArray(intptr_t cid,
3562 intptr_t instance_size, 3561 intptr_t instance_size,
3563 Label* failure, 3562 Label* failure,
3564 bool near_jump, 3563 bool near_jump,
3565 Register instance, 3564 Register instance,
3566 Register end_address) { 3565 Register end_address,
3566 Register temp) {
3567 ASSERT(failure != NULL); 3567 ASSERT(failure != NULL);
3568 if (FLAG_inline_alloc) { 3568 if (FLAG_inline_alloc) {
3569 // If this allocation is traced, program will jump to failure path 3569 // If this allocation is traced, program will jump to failure path
3570 // (i.e. the allocation stub) which will allocate the object and trace the 3570 // (i.e. the allocation stub) which will allocate the object and trace the
3571 // allocation call site. 3571 // allocation call site.
3572 MaybeTraceAllocation(cid, failure, near_jump); 3572 MaybeTraceAllocation(cid, failure, near_jump, /* inline_isolate = */ false);
3573 Isolate* isolate = Isolate::Current(); 3573 Heap::Space space = Heap::SpaceForAllocation(cid);
3574 Heap* heap = isolate->heap(); 3574 movq(temp, Address(THR, Thread::heap_offset()));
3575 Heap::Space space = heap->SpaceForAllocation(cid); 3575 movq(instance, Address(temp, Heap::TopOffset(space)));
3576 movq(instance, Immediate(heap->TopAddress(space))); 3576 movq(end_address, instance);
3577 movq(instance, Address(instance, 0));
3578 movq(end_address, RAX);
3579 3577
3580 addq(end_address, Immediate(instance_size)); 3578 addq(end_address, Immediate(instance_size));
3581 j(CARRY, failure); 3579 j(CARRY, failure);
3582 3580
3583 // Check if the allocation fits into the remaining space. 3581 // Check if the allocation fits into the remaining space.
3584 // instance: potential new object start. 3582 // instance: potential new object start.
3585 // end_address: potential next object start. 3583 // end_address: potential next object start.
3586 movq(TMP, Immediate(heap->EndAddress(space))); 3584 cmpq(end_address, Address(temp, Heap::EndOffset(space)));
3587 cmpq(end_address, Address(TMP, 0));
3588 j(ABOVE_EQUAL, failure); 3585 j(ABOVE_EQUAL, failure);
3589 3586
3590 // Successfully allocated the object(s), now update top to point to 3587 // Successfully allocated the object(s), now update top to point to
3591 // next object start and initialize the object. 3588 // next object start and initialize the object.
3592 movq(TMP, Immediate(heap->TopAddress(space))); 3589 movq(Address(temp, Heap::TopOffset(space)), end_address);
3593 movq(Address(TMP, 0), end_address);
3594 addq(instance, Immediate(kHeapObjectTag)); 3590 addq(instance, Immediate(kHeapObjectTag));
3595 UpdateAllocationStatsWithSize(cid, instance_size, space); 3591 UpdateAllocationStatsWithSize(cid, instance_size, space,
3592 /* inline_isolate = */ false);
3596 3593
3597 // Initialize the tags. 3594 // Initialize the tags.
3598 // instance: new object start as a tagged pointer. 3595 // instance: new object start as a tagged pointer.
3599 uword tags = 0; 3596 uword tags = 0;
3600 tags = RawObject::ClassIdTag::update(cid, tags); 3597 tags = RawObject::ClassIdTag::update(cid, tags);
3601 tags = RawObject::SizeTag::update(instance_size, tags); 3598 tags = RawObject::SizeTag::update(instance_size, tags);
3602 movq(FieldAddress(instance, Array::tags_offset()), Immediate(tags)); 3599 movq(FieldAddress(instance, Array::tags_offset()), Immediate(tags));
3603 } else { 3600 } else {
3604 jmp(failure); 3601 jmp(failure);
3605 } 3602 }
(...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after
3931 3928
3932 3929
3933 const char* Assembler::FpuRegisterName(FpuRegister reg) { 3930 const char* Assembler::FpuRegisterName(FpuRegister reg) {
3934 ASSERT((0 <= reg) && (reg < kNumberOfXmmRegisters)); 3931 ASSERT((0 <= reg) && (reg < kNumberOfXmmRegisters));
3935 return xmm_reg_names[reg]; 3932 return xmm_reg_names[reg];
3936 } 3933 }
3937 3934
3938 } // namespace dart 3935 } // namespace dart
3939 3936
3940 #endif // defined TARGET_ARCH_X64 3937 #endif // defined TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698