Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1081)

Side by Side Diff: runtime/vm/assembler_arm.cc

Issue 1213013002: Update Assembler::TryAllocate to support inline allocation tracing (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/assembler_arm.h ('k') | runtime/vm/assembler_arm64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // NOLINT 5 #include "vm/globals.h" // NOLINT
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/cpu.h" 9 #include "vm/cpu.h"
10 #include "vm/longjump.h" 10 #include "vm/longjump.h"
(...skipping 3325 matching lines...) Expand 10 before | Expand all | Expand 10 after
3336 3336
3337 3337
3338 void Assembler::LeaveStubFrame() { 3338 void Assembler::LeaveStubFrame() {
3339 LeaveFrame((1 << PP) | (1 << FP) | (1 << LR)); 3339 LeaveFrame((1 << PP) | (1 << FP) | (1 << LR));
3340 // Adjust SP for null PC pushed in EnterStubFrame. 3340 // Adjust SP for null PC pushed in EnterStubFrame.
3341 AddImmediate(SP, kWordSize); 3341 AddImmediate(SP, kWordSize);
3342 } 3342 }
3343 3343
3344 3344
3345 void Assembler::LoadAllocationStatsAddress(Register dest, 3345 void Assembler::LoadAllocationStatsAddress(Register dest,
3346 intptr_t cid, 3346 intptr_t cid) {
3347 Heap::Space space) {
3348 ASSERT(dest != kNoRegister); 3347 ASSERT(dest != kNoRegister);
3349 ASSERT(dest != TMP); 3348 ASSERT(dest != TMP);
3350 ASSERT(cid > 0); 3349 ASSERT(cid > 0);
3351 Isolate* isolate = Isolate::Current(); 3350 Isolate* isolate = Isolate::Current();
3352 ClassTable* class_table = isolate->class_table(); 3351 ClassTable* class_table = isolate->class_table();
3353 if (cid < kNumPredefinedCids) { 3352 if (cid < kNumPredefinedCids) {
3354 const uword class_heap_stats_table_address = 3353 const uword class_heap_stats_table_address =
3355 class_table->PredefinedClassHeapStatsTableAddress(); 3354 class_table->PredefinedClassHeapStatsTableAddress();
3356 const uword class_offset = cid * sizeof(ClassHeapStats); // NOLINT 3355 const uword class_offset = cid * sizeof(ClassHeapStats); // NOLINT
3357 LoadImmediate(dest, class_heap_stats_table_address + class_offset); 3356 LoadImmediate(dest, class_heap_stats_table_address + class_offset);
3358 } else { 3357 } else {
3359 const uword class_offset = cid * sizeof(ClassHeapStats); // NOLINT 3358 const uword class_offset = cid * sizeof(ClassHeapStats); // NOLINT
3360 LoadImmediate(dest, class_table->ClassStatsTableAddress()); 3359 LoadImmediate(dest, class_table->ClassStatsTableAddress());
3361 ldr(dest, Address(dest, 0)); 3360 ldr(dest, Address(dest, 0));
3362 AddImmediate(dest, class_offset); 3361 AddImmediate(dest, class_offset);
3363 } 3362 }
3364 } 3363 }
3365 3364
3366 3365
3366 void Assembler::MaybeTraceAllocation(intptr_t cid,
3367 Register temp_reg,
3368 Label* trace) {
3369 LoadAllocationStatsAddress(temp_reg, cid);
3370 const uword state_offset = ClassHeapStats::state_offset();
3371 const Address& state_address = Address(temp_reg, state_offset);
3372 ldr(temp_reg, state_address);
3373 tst(temp_reg, Operand(ClassHeapStats::TraceAllocationMask()));
3374 b(trace, NE);
3375 }
3376
3377
3367 void Assembler::IncrementAllocationStats(Register stats_addr_reg, 3378 void Assembler::IncrementAllocationStats(Register stats_addr_reg,
3368 intptr_t cid, 3379 intptr_t cid,
3369 Heap::Space space) { 3380 Heap::Space space) {
3370 ASSERT(stats_addr_reg != kNoRegister); 3381 ASSERT(stats_addr_reg != kNoRegister);
3371 ASSERT(stats_addr_reg != TMP); 3382 ASSERT(stats_addr_reg != TMP);
3372 ASSERT(cid > 0); 3383 ASSERT(cid > 0);
3373 const uword count_field_offset = (space == Heap::kNew) ? 3384 const uword count_field_offset = (space == Heap::kNew) ?
3374 ClassHeapStats::allocated_since_gc_new_space_offset() : 3385 ClassHeapStats::allocated_since_gc_new_space_offset() :
3375 ClassHeapStats::allocated_since_gc_old_space_offset(); 3386 ClassHeapStats::allocated_since_gc_old_space_offset();
3376 const Address& count_address = Address(stats_addr_reg, count_field_offset); 3387 const Address& count_address = Address(stats_addr_reg, count_field_offset);
(...skipping 30 matching lines...) Expand all
3407 void Assembler::TryAllocate(const Class& cls, 3418 void Assembler::TryAllocate(const Class& cls,
3408 Label* failure, 3419 Label* failure,
3409 Register instance_reg, 3420 Register instance_reg,
3410 Register temp_reg) { 3421 Register temp_reg) {
3411 ASSERT(failure != NULL); 3422 ASSERT(failure != NULL);
3412 if (FLAG_inline_alloc) { 3423 if (FLAG_inline_alloc) {
3413 ASSERT(instance_reg != temp_reg); 3424 ASSERT(instance_reg != temp_reg);
3414 ASSERT(temp_reg != IP); 3425 ASSERT(temp_reg != IP);
3415 const intptr_t instance_size = cls.instance_size(); 3426 const intptr_t instance_size = cls.instance_size();
3416 ASSERT(instance_size != 0); 3427 ASSERT(instance_size != 0);
3428 // If this allocation is traced, program will jump to failure path
3429 // (i.e. the allocation stub) which will allocate the object and trace the
3430 // allocation call site.
3431 MaybeTraceAllocation(cls.id(), temp_reg, failure);
3417 Heap* heap = Isolate::Current()->heap(); 3432 Heap* heap = Isolate::Current()->heap();
3418 Heap::Space space = heap->SpaceForAllocation(cls.id()); 3433 Heap::Space space = heap->SpaceForAllocation(cls.id());
3419 const uword top_address = heap->TopAddress(space); 3434 const uword top_address = heap->TopAddress(space);
3420 LoadImmediate(temp_reg, top_address); 3435 LoadImmediate(temp_reg, top_address);
3421 ldr(instance_reg, Address(temp_reg)); 3436 ldr(instance_reg, Address(temp_reg));
3422 // TODO(koda): Protect against unsigned overflow here. 3437 // TODO(koda): Protect against unsigned overflow here.
3423 AddImmediateSetFlags(instance_reg, instance_reg, instance_size); 3438 AddImmediateSetFlags(instance_reg, instance_reg, instance_size);
3424 3439
3425 // instance_reg: potential next object start. 3440 // instance_reg: potential next object start.
3426 const uword end_address = heap->EndAddress(space); 3441 const uword end_address = heap->EndAddress(space);
3427 ASSERT(top_address < end_address); 3442 ASSERT(top_address < end_address);
3428 // Could use ldm to load (top, end), but no benefit seen experimentally. 3443 // Could use ldm to load (top, end), but no benefit seen experimentally.
3429 ldr(IP, Address(temp_reg, end_address - top_address)); 3444 ldr(IP, Address(temp_reg, end_address - top_address));
3430 cmp(IP, Operand(instance_reg)); 3445 cmp(IP, Operand(instance_reg));
3431 // fail if heap end unsigned less than or equal to instance_reg. 3446 // fail if heap end unsigned less than or equal to instance_reg.
3432 b(failure, LS); 3447 b(failure, LS);
3433 3448
3434 // Successfully allocated the object, now update top to point to 3449 // Successfully allocated the object, now update top to point to
3435 // next object start and store the class in the class field of object. 3450 // next object start and store the class in the class field of object.
3436 str(instance_reg, Address(temp_reg)); 3451 str(instance_reg, Address(temp_reg));
3437 3452
3438 LoadAllocationStatsAddress(temp_reg, cls.id(), space); 3453 LoadAllocationStatsAddress(temp_reg, cls.id());
3439 3454
3440 ASSERT(instance_size >= kHeapObjectTag); 3455 ASSERT(instance_size >= kHeapObjectTag);
3441 AddImmediate(instance_reg, -instance_size + kHeapObjectTag); 3456 AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
3442 3457
3443 uword tags = 0; 3458 uword tags = 0;
3444 tags = RawObject::SizeTag::update(instance_size, tags); 3459 tags = RawObject::SizeTag::update(instance_size, tags);
3445 ASSERT(cls.id() != kIllegalCid); 3460 ASSERT(cls.id() != kIllegalCid);
3446 tags = RawObject::ClassIdTag::update(cls.id(), tags); 3461 tags = RawObject::ClassIdTag::update(cls.id(), tags);
3447 LoadImmediate(IP, tags); 3462 LoadImmediate(IP, tags);
3448 str(IP, FieldAddress(instance_reg, Object::tags_offset())); 3463 str(IP, FieldAddress(instance_reg, Object::tags_offset()));
(...skipping 22 matching lines...) Expand all
3471 b(failure, CS); // Branch if unsigned overflow. 3486 b(failure, CS); // Branch if unsigned overflow.
3472 3487
3473 // Check if the allocation fits into the remaining space. 3488 // Check if the allocation fits into the remaining space.
3474 // instance: potential new object start. 3489 // instance: potential new object start.
3475 // end_address: potential next object start. 3490 // end_address: potential next object start.
3476 LoadImmediate(temp2, heap->EndAddress(space)); 3491 LoadImmediate(temp2, heap->EndAddress(space));
3477 ldr(temp2, Address(temp2, 0)); 3492 ldr(temp2, Address(temp2, 0));
3478 cmp(end_address, Operand(temp2)); 3493 cmp(end_address, Operand(temp2));
3479 b(failure, CS); 3494 b(failure, CS);
3480 3495
3481 LoadAllocationStatsAddress(temp2, cid, space); 3496 LoadAllocationStatsAddress(temp2, cid);
3482 3497
3483 // Successfully allocated the object(s), now update top to point to 3498 // Successfully allocated the object(s), now update top to point to
3484 // next object start and initialize the object. 3499 // next object start and initialize the object.
3485 str(end_address, Address(temp1, 0)); 3500 str(end_address, Address(temp1, 0));
3486 add(instance, instance, Operand(kHeapObjectTag)); 3501 add(instance, instance, Operand(kHeapObjectTag));
3487 3502
3488 // Initialize the tags. 3503 // Initialize the tags.
3489 // instance: new object start as a tagged pointer. 3504 // instance: new object start as a tagged pointer.
3490 uword tags = 0; 3505 uword tags = 0;
3491 tags = RawObject::ClassIdTag::update(cid, tags); 3506 tags = RawObject::ClassIdTag::update(cid, tags);
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
3609 3624
3610 3625
3611 const char* Assembler::FpuRegisterName(FpuRegister reg) { 3626 const char* Assembler::FpuRegisterName(FpuRegister reg) {
3612 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); 3627 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters));
3613 return fpu_reg_names[reg]; 3628 return fpu_reg_names[reg];
3614 } 3629 }
3615 3630
3616 } // namespace dart 3631 } // namespace dart
3617 3632
3618 #endif // defined TARGET_ARCH_ARM 3633 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/assembler_arm.h ('k') | runtime/vm/assembler_arm64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698