Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(8)

Side by Side Diff: src/heap.cc

Issue 40063002: Bookkeeping for allocation site pretenuring (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE. Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
80 // Will be 4 * reserved_semispace_size_ to ensure that young 80 // Will be 4 * reserved_semispace_size_ to ensure that young
81 // generation can be aligned to its size. 81 // generation can be aligned to its size.
82 maximum_committed_(0), 82 maximum_committed_(0),
83 survived_since_last_expansion_(0), 83 survived_since_last_expansion_(0),
84 sweep_generation_(0), 84 sweep_generation_(0),
85 always_allocate_scope_depth_(0), 85 always_allocate_scope_depth_(0),
86 linear_allocation_scope_depth_(0), 86 linear_allocation_scope_depth_(0),
87 contexts_disposed_(0), 87 contexts_disposed_(0),
88 global_ic_age_(0), 88 global_ic_age_(0),
89 flush_monomorphic_ics_(false), 89 flush_monomorphic_ics_(false),
90 allocation_mementos_found_(0),
91 scan_on_scavenge_pages_(0), 90 scan_on_scavenge_pages_(0),
92 new_space_(this), 91 new_space_(this),
93 old_pointer_space_(NULL), 92 old_pointer_space_(NULL),
94 old_data_space_(NULL), 93 old_data_space_(NULL),
95 code_space_(NULL), 94 code_space_(NULL),
96 map_space_(NULL), 95 map_space_(NULL),
97 cell_space_(NULL), 96 cell_space_(NULL),
98 property_cell_space_(NULL), 97 property_cell_space_(NULL),
99 lo_space_(NULL), 98 lo_space_(NULL),
100 gc_state_(NOT_IN_GC), 99 gc_state_(NOT_IN_GC),
(...skipping 398 matching lines...) Expand 10 before | Expand all | Expand 10 after
499 PagedSpaces spaces(this); 498 PagedSpaces spaces(this);
500 for (PagedSpace* space = spaces.next(); 499 for (PagedSpace* space = spaces.next();
501 space != NULL; 500 space != NULL;
502 space = spaces.next()) { 501 space = spaces.next()) {
503 space->RepairFreeListsAfterBoot(); 502 space->RepairFreeListsAfterBoot();
504 } 503 }
505 } 504 }
506 505
507 506
508 void Heap::GarbageCollectionEpilogue() { 507 void Heap::GarbageCollectionEpilogue() {
508 if (FLAG_allocation_site_pretenuring) {
509 int tenure_decisions = 0;
510 int dont_tenure_decisions = 0;
511 int allocation_mementos_found = 0;
512
513 Object* cur = allocation_sites_list();
514 while (cur->IsAllocationSite()) {
515 AllocationSite* casted = AllocationSite::cast(cur);
516 allocation_mementos_found += casted->memento_found_count()->value();
517 if (casted->DigestPretenuringFeedback()) {
518 if (casted->GetPretenureMode() == TENURED) {
519 tenure_decisions++;
520 } else {
521 dont_tenure_decisions++;
522 }
523 }
524 cur = casted->weak_next();
525 }
526
527 // TODO(mvstanton): Pretenure decisions are only made once for an allocation
528 // site. Find a sane way to decide about revisiting the decision later.
529
530 if (FLAG_trace_track_allocation_sites &&
531 (allocation_mementos_found > 0 ||
532 tenure_decisions > 0 ||
533 dont_tenure_decisions > 0)) {
534 PrintF("GC: (#mementos, #tenure decisions, #donttenure decisions) "
535 "(%d, %d, %d)\n",
536 allocation_mementos_found,
537 tenure_decisions,
538 dont_tenure_decisions);
539 }
540 }
541
509 store_buffer()->GCEpilogue(); 542 store_buffer()->GCEpilogue();
510 543
511 // In release mode, we only zap the from space under heap verification. 544 // In release mode, we only zap the from space under heap verification.
512 if (Heap::ShouldZapGarbage()) { 545 if (Heap::ShouldZapGarbage()) {
513 ZapFromSpace(); 546 ZapFromSpace();
514 } 547 }
515 548
516 #ifdef VERIFY_HEAP 549 #ifdef VERIFY_HEAP
517 if (FLAG_verify_heap) { 550 if (FLAG_verify_heap) {
518 Verify(); 551 Verify();
(...skipping 867 matching lines...) Expand 10 before | Expand all | Expand 10 after
1386 } 1419 }
1387 1420
1388 private: 1421 private:
1389 Heap* heap_; 1422 Heap* heap_;
1390 }; 1423 };
1391 1424
1392 1425
1393 void Heap::Scavenge() { 1426 void Heap::Scavenge() {
1394 RelocationLock relocation_lock(this); 1427 RelocationLock relocation_lock(this);
1395 1428
1396 allocation_mementos_found_ = 0;
1397
1398 #ifdef VERIFY_HEAP 1429 #ifdef VERIFY_HEAP
1399 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this); 1430 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1400 #endif 1431 #endif
1401 1432
1402 gc_state_ = SCAVENGE; 1433 gc_state_ = SCAVENGE;
1403 1434
1404 // Implements Cheney's copying algorithm 1435 // Implements Cheney's copying algorithm
1405 LOG(isolate_, ResourceEvent("scavenge", "begin")); 1436 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1406 1437
1407 // Clear descriptor cache. 1438 // Clear descriptor cache.
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
1535 1566
1536 // Update how much has survived scavenge. 1567 // Update how much has survived scavenge.
1537 IncrementYoungSurvivorsCounter(static_cast<int>( 1568 IncrementYoungSurvivorsCounter(static_cast<int>(
1538 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size())); 1569 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1539 1570
1540 LOG(isolate_, ResourceEvent("scavenge", "end")); 1571 LOG(isolate_, ResourceEvent("scavenge", "end"));
1541 1572
1542 gc_state_ = NOT_IN_GC; 1573 gc_state_ = NOT_IN_GC;
1543 1574
1544 scavenges_since_last_idle_round_++; 1575 scavenges_since_last_idle_round_++;
1545
1546 if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0) {
1547 PrintF("AllocationMementos found during scavenge = %d\n",
1548 allocation_mementos_found_);
1549 }
1550 } 1576 }
1551 1577
1552 1578
1553 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, 1579 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1554 Object** p) { 1580 Object** p) {
1555 MapWord first_word = HeapObject::cast(*p)->map_word(); 1581 MapWord first_word = HeapObject::cast(*p)->map_word();
1556 1582
1557 if (!first_word.IsForwardingAddress()) { 1583 if (!first_word.IsForwardingAddress()) {
1558 // Unreachable external string can be finalized. 1584 // Unreachable external string can be finalized.
1559 heap->FinalizeExternalString(String::cast(*p)); 1585 heap->FinalizeExternalString(String::cast(*p));
(...skipping 2792 matching lines...) Expand 10 before | Expand all | Expand 10 after
4352 4378
4353 #ifdef VERIFY_HEAP 4379 #ifdef VERIFY_HEAP
4354 if (FLAG_verify_heap) { 4380 if (FLAG_verify_heap) {
4355 code->Verify(); 4381 code->Verify();
4356 } 4382 }
4357 #endif 4383 #endif
4358 return new_code; 4384 return new_code;
4359 } 4385 }
4360 4386
4361 4387
4388 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
4389 AllocationSite* allocation_site) {
4390 memento->set_map_no_write_barrier(allocation_memento_map());
4391 ASSERT(allocation_site->map() == allocation_site_map());
4392 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
4393 if (FLAG_allocation_site_pretenuring) {
4394 allocation_site->IncrementMementoCreateCount();
4395 }
4396 }
4397
4398
4362 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space, 4399 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4363 Handle<AllocationSite> allocation_site) { 4400 Handle<AllocationSite> allocation_site) {
4364 ASSERT(gc_state_ == NOT_IN_GC); 4401 ASSERT(gc_state_ == NOT_IN_GC);
4365 ASSERT(map->instance_type() != MAP_TYPE); 4402 ASSERT(map->instance_type() != MAP_TYPE);
4366 // If allocation failures are disallowed, we may allocate in a different 4403 // If allocation failures are disallowed, we may allocate in a different
4367 // space when new space is full and the object is not a large object. 4404 // space when new space is full and the object is not a large object.
4368 AllocationSpace retry_space = 4405 AllocationSpace retry_space =
4369 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); 4406 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4370 int size = map->instance_size() + AllocationMemento::kSize; 4407 int size = map->instance_size() + AllocationMemento::kSize;
4371 Object* result; 4408 Object* result;
4372 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); 4409 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4373 if (!maybe_result->ToObject(&result)) return maybe_result; 4410 if (!maybe_result->ToObject(&result)) return maybe_result;
4374 // No need for write barrier since object is white and map is in old space. 4411 // No need for write barrier since object is white and map is in old space.
4375 HeapObject::cast(result)->set_map_no_write_barrier(map); 4412 HeapObject::cast(result)->set_map_no_write_barrier(map);
4376 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( 4413 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4377 reinterpret_cast<Address>(result) + map->instance_size()); 4414 reinterpret_cast<Address>(result) + map->instance_size());
4378 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 4415 InitializeAllocationMemento(alloc_memento, *allocation_site);
4379 ASSERT(allocation_site->map() == allocation_site_map());
4380 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4381 return result; 4416 return result;
4382 } 4417 }
4383 4418
4384 4419
4385 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { 4420 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4386 ASSERT(gc_state_ == NOT_IN_GC); 4421 ASSERT(gc_state_ == NOT_IN_GC);
4387 ASSERT(map->instance_type() != MAP_TYPE); 4422 ASSERT(map->instance_type() != MAP_TYPE);
4388 // If allocation failures are disallowed, we may allocate in a different 4423 // If allocation failures are disallowed, we may allocate in a different
4389 // space when new space is full and the object is not a large object. 4424 // space when new space is full and the object is not a large object.
4390 AllocationSpace retry_space = 4425 AllocationSpace retry_space =
(...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after
4803 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) { 4838 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4804 // Never used to copy functions. If functions need to be copied we 4839 // Never used to copy functions. If functions need to be copied we
4805 // have to be careful to clear the literals array. 4840 // have to be careful to clear the literals array.
4806 SLOW_ASSERT(!source->IsJSFunction()); 4841 SLOW_ASSERT(!source->IsJSFunction());
4807 4842
4808 // Make the clone. 4843 // Make the clone.
4809 Map* map = source->map(); 4844 Map* map = source->map();
4810 int object_size = map->instance_size(); 4845 int object_size = map->instance_size();
4811 Object* clone; 4846 Object* clone;
4812 4847
4813 ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) && 4848 ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
4814 map->instance_type() == JS_ARRAY_TYPE));
4815 4849
4816 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; 4850 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4817 4851
4818 // If we're forced to always allocate, we use the general allocation 4852 // If we're forced to always allocate, we use the general allocation
4819 // functions which may leave us with an object in old space. 4853 // functions which may leave us with an object in old space.
4820 if (always_allocate()) { 4854 if (always_allocate()) {
4821 { MaybeObject* maybe_clone = 4855 { MaybeObject* maybe_clone =
4822 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); 4856 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4823 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 4857 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4824 } 4858 }
(...skipping 18 matching lines...) Expand all
4843 SLOW_ASSERT(InNewSpace(clone)); 4877 SLOW_ASSERT(InNewSpace(clone));
4844 // Since we know the clone is allocated in new space, we can copy 4878 // Since we know the clone is allocated in new space, we can copy
4845 // the contents without worrying about updating the write barrier. 4879 // the contents without worrying about updating the write barrier.
4846 CopyBlock(HeapObject::cast(clone)->address(), 4880 CopyBlock(HeapObject::cast(clone)->address(),
4847 source->address(), 4881 source->address(),
4848 object_size); 4882 object_size);
4849 4883
4850 if (site != NULL) { 4884 if (site != NULL) {
4851 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( 4885 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4852 reinterpret_cast<Address>(clone) + object_size); 4886 reinterpret_cast<Address>(clone) + object_size);
4853 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 4887 InitializeAllocationMemento(alloc_memento, site);
4854 ASSERT(site->map() == allocation_site_map());
4855 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
4856 HeapProfiler* profiler = isolate()->heap_profiler(); 4888 HeapProfiler* profiler = isolate()->heap_profiler();
4857 if (profiler->is_tracking_allocations()) { 4889 if (profiler->is_tracking_allocations()) {
4858 profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(), 4890 profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
4859 object_size); 4891 object_size);
4860 profiler->NewObjectEvent(alloc_memento->address(), 4892 profiler->NewObjectEvent(alloc_memento->address(),
4861 AllocationMemento::kSize); 4893 AllocationMemento::kSize);
4862 } 4894 }
4863 } 4895 }
4864 } 4896 }
4865 4897
(...skipping 3097 matching lines...) Expand 10 before | Expand all | Expand 10 after
7963 static_cast<int>(object_sizes_last_time_[index])); 7995 static_cast<int>(object_sizes_last_time_[index]));
7964 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) 7996 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
7965 #undef ADJUST_LAST_TIME_OBJECT_COUNT 7997 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7966 7998
7967 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); 7999 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7968 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); 8000 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7969 ClearObjectStats(); 8001 ClearObjectStats();
7970 } 8002 }
7971 8003
7972 } } // namespace v8::internal 8004 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698