Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(26)

Side by Side Diff: src/heap.cc

Issue 40063002: Bookkeeping for allocation site pretenuring (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Addressin comments. Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
80 // Will be 4 * reserved_semispace_size_ to ensure that young 80 // Will be 4 * reserved_semispace_size_ to ensure that young
81 // generation can be aligned to its size. 81 // generation can be aligned to its size.
82 maximum_committed_(0), 82 maximum_committed_(0),
83 survived_since_last_expansion_(0), 83 survived_since_last_expansion_(0),
84 sweep_generation_(0), 84 sweep_generation_(0),
85 always_allocate_scope_depth_(0), 85 always_allocate_scope_depth_(0),
86 linear_allocation_scope_depth_(0), 86 linear_allocation_scope_depth_(0),
87 contexts_disposed_(0), 87 contexts_disposed_(0),
88 global_ic_age_(0), 88 global_ic_age_(0),
89 flush_monomorphic_ics_(false), 89 flush_monomorphic_ics_(false),
90 allocation_mementos_found_(0),
91 scan_on_scavenge_pages_(0), 90 scan_on_scavenge_pages_(0),
92 new_space_(this), 91 new_space_(this),
93 old_pointer_space_(NULL), 92 old_pointer_space_(NULL),
94 old_data_space_(NULL), 93 old_data_space_(NULL),
95 code_space_(NULL), 94 code_space_(NULL),
96 map_space_(NULL), 95 map_space_(NULL),
97 cell_space_(NULL), 96 cell_space_(NULL),
98 property_cell_space_(NULL), 97 property_cell_space_(NULL),
99 lo_space_(NULL), 98 lo_space_(NULL),
100 gc_state_(NOT_IN_GC), 99 gc_state_(NOT_IN_GC),
(...skipping 398 matching lines...) Expand 10 before | Expand all | Expand 10 after
499 PagedSpaces spaces(this); 498 PagedSpaces spaces(this);
500 for (PagedSpace* space = spaces.next(); 499 for (PagedSpace* space = spaces.next();
501 space != NULL; 500 space != NULL;
502 space = spaces.next()) { 501 space = spaces.next()) {
503 space->RepairFreeListsAfterBoot(); 502 space->RepairFreeListsAfterBoot();
504 } 503 }
505 } 504 }
506 505
507 506
508 void Heap::GarbageCollectionEpilogue() { 507 void Heap::GarbageCollectionEpilogue() {
508 if (FLAG_allocation_site_pretenuring) {
509 int tenure_decisions = 0;
510 int dont_tenure_decisions = 0;
511 int allocation_mementos_found = 0;
512
513 Object* cur = allocation_sites_list();
514 while (cur->IsAllocationSite()) {
515 AllocationSite* casted = AllocationSite::cast(cur);
516 allocation_mementos_found += casted->memento_found_count()->value();
517 if (casted->DigestPretenuringFeedback()) {
518 if (casted->GetPretenureMode() == TENURED) {
519 tenure_decisions++;
520 } else {
521 dont_tenure_decisions++;
522 }
523 }
524 cur = casted->weak_next();
525 }
526
527 // TODO(mvstanton): Pretenure decisions are only made once for an allocation
528 // site. Find a sane way to decide about revisiting the decision later.
529
530 if (FLAG_trace_track_allocation_sites &&
531 (allocation_mementos_found > 0 ||
532 tenure_decisions > 0 ||
533 dont_tenure_decisions > 0)) {
534 PrintF("GC: (#mementos, #tenure decisions, #donttenure decisions) "
535 "(%d, %d, %d)\n",
536 allocation_mementos_found,
537 tenure_decisions,
538 dont_tenure_decisions);
539 }
540 }
541
509 store_buffer()->GCEpilogue(); 542 store_buffer()->GCEpilogue();
510 543
511 // In release mode, we only zap the from space under heap verification. 544 // In release mode, we only zap the from space under heap verification.
512 if (Heap::ShouldZapGarbage()) { 545 if (Heap::ShouldZapGarbage()) {
513 ZapFromSpace(); 546 ZapFromSpace();
514 } 547 }
515 548
516 #ifdef VERIFY_HEAP 549 #ifdef VERIFY_HEAP
517 if (FLAG_verify_heap) { 550 if (FLAG_verify_heap) {
518 Verify(); 551 Verify();
(...skipping 867 matching lines...) Expand 10 before | Expand all | Expand 10 after
1386 } 1419 }
1387 1420
1388 private: 1421 private:
1389 Heap* heap_; 1422 Heap* heap_;
1390 }; 1423 };
1391 1424
1392 1425
1393 void Heap::Scavenge() { 1426 void Heap::Scavenge() {
1394 RelocationLock relocation_lock(this); 1427 RelocationLock relocation_lock(this);
1395 1428
1396 allocation_mementos_found_ = 0;
1397
1398 #ifdef VERIFY_HEAP 1429 #ifdef VERIFY_HEAP
1399 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this); 1430 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1400 #endif 1431 #endif
1401 1432
1402 gc_state_ = SCAVENGE; 1433 gc_state_ = SCAVENGE;
1403 1434
1404 // Implements Cheney's copying algorithm 1435 // Implements Cheney's copying algorithm
1405 LOG(isolate_, ResourceEvent("scavenge", "begin")); 1436 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1406 1437
1407 // Clear descriptor cache. 1438 // Clear descriptor cache.
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
1535 1566
1536 // Update how much has survived scavenge. 1567 // Update how much has survived scavenge.
1537 IncrementYoungSurvivorsCounter(static_cast<int>( 1568 IncrementYoungSurvivorsCounter(static_cast<int>(
1538 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size())); 1569 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1539 1570
1540 LOG(isolate_, ResourceEvent("scavenge", "end")); 1571 LOG(isolate_, ResourceEvent("scavenge", "end"));
1541 1572
1542 gc_state_ = NOT_IN_GC; 1573 gc_state_ = NOT_IN_GC;
1543 1574
1544 scavenges_since_last_idle_round_++; 1575 scavenges_since_last_idle_round_++;
1545
1546 if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0) {
1547 PrintF("AllocationMementos found during scavenge = %d\n",
1548 allocation_mementos_found_);
1549 }
1550 } 1576 }
1551 1577
1552 1578
1553 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, 1579 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1554 Object** p) { 1580 Object** p) {
1555 MapWord first_word = HeapObject::cast(*p)->map_word(); 1581 MapWord first_word = HeapObject::cast(*p)->map_word();
1556 1582
1557 if (!first_word.IsForwardingAddress()) { 1583 if (!first_word.IsForwardingAddress()) {
1558 // Unreachable external string can be finalized. 1584 // Unreachable external string can be finalized.
1559 heap->FinalizeExternalString(String::cast(*p)); 1585 heap->FinalizeExternalString(String::cast(*p));
(...skipping 2790 matching lines...) Expand 10 before | Expand all | Expand 10 after
4350 4376
4351 #ifdef VERIFY_HEAP 4377 #ifdef VERIFY_HEAP
4352 if (FLAG_verify_heap) { 4378 if (FLAG_verify_heap) {
4353 code->Verify(); 4379 code->Verify();
4354 } 4380 }
4355 #endif 4381 #endif
4356 return new_code; 4382 return new_code;
4357 } 4383 }
4358 4384
4359 4385
4386 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
4387 AllocationSite* allocation_site) {
4388 memento->set_map_no_write_barrier(allocation_memento_map());
4389 ASSERT(allocation_site->map() == allocation_site_map());
4390 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
4391 if (FLAG_allocation_site_pretenuring) {
4392 allocation_site->IncrementMementoCreateCount();
4393 }
4394 }
4395
4396
4360 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space, 4397 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4361 Handle<AllocationSite> allocation_site) { 4398 Handle<AllocationSite> allocation_site) {
4362 ASSERT(gc_state_ == NOT_IN_GC); 4399 ASSERT(gc_state_ == NOT_IN_GC);
4363 ASSERT(map->instance_type() != MAP_TYPE); 4400 ASSERT(map->instance_type() != MAP_TYPE);
4364 // If allocation failures are disallowed, we may allocate in a different 4401 // If allocation failures are disallowed, we may allocate in a different
4365 // space when new space is full and the object is not a large object. 4402 // space when new space is full and the object is not a large object.
4366 AllocationSpace retry_space = 4403 AllocationSpace retry_space =
4367 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); 4404 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4368 int size = map->instance_size() + AllocationMemento::kSize; 4405 int size = map->instance_size() + AllocationMemento::kSize;
4369 Object* result; 4406 Object* result;
4370 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); 4407 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4371 if (!maybe_result->ToObject(&result)) return maybe_result; 4408 if (!maybe_result->ToObject(&result)) return maybe_result;
4372 // No need for write barrier since object is white and map is in old space. 4409 // No need for write barrier since object is white and map is in old space.
4373 HeapObject::cast(result)->set_map_no_write_barrier(map); 4410 HeapObject::cast(result)->set_map_no_write_barrier(map);
4374 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( 4411 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4375 reinterpret_cast<Address>(result) + map->instance_size()); 4412 reinterpret_cast<Address>(result) + map->instance_size());
4376 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 4413 InitializeAllocationMemento(alloc_memento, *allocation_site);
4377 ASSERT(allocation_site->map() == allocation_site_map());
4378 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4379 return result; 4414 return result;
4380 } 4415 }
4381 4416
4382 4417
4383 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { 4418 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4384 ASSERT(gc_state_ == NOT_IN_GC); 4419 ASSERT(gc_state_ == NOT_IN_GC);
4385 ASSERT(map->instance_type() != MAP_TYPE); 4420 ASSERT(map->instance_type() != MAP_TYPE);
4386 // If allocation failures are disallowed, we may allocate in a different 4421 // If allocation failures are disallowed, we may allocate in a different
4387 // space when new space is full and the object is not a large object. 4422 // space when new space is full and the object is not a large object.
4388 AllocationSpace retry_space = 4423 AllocationSpace retry_space =
(...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after
4801 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) { 4836 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4802 // Never used to copy functions. If functions need to be copied we 4837 // Never used to copy functions. If functions need to be copied we
4803 // have to be careful to clear the literals array. 4838 // have to be careful to clear the literals array.
4804 SLOW_ASSERT(!source->IsJSFunction()); 4839 SLOW_ASSERT(!source->IsJSFunction());
4805 4840
4806 // Make the clone. 4841 // Make the clone.
4807 Map* map = source->map(); 4842 Map* map = source->map();
4808 int object_size = map->instance_size(); 4843 int object_size = map->instance_size();
4809 Object* clone; 4844 Object* clone;
4810 4845
4811 ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) && 4846 ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
4812 map->instance_type() == JS_ARRAY_TYPE));
4813 4847
4814 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; 4848 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4815 4849
4816 // If we're forced to always allocate, we use the general allocation 4850 // If we're forced to always allocate, we use the general allocation
4817 // functions which may leave us with an object in old space. 4851 // functions which may leave us with an object in old space.
4818 if (always_allocate()) { 4852 if (always_allocate()) {
4819 { MaybeObject* maybe_clone = 4853 { MaybeObject* maybe_clone =
4820 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); 4854 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4821 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 4855 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4822 } 4856 }
(...skipping 18 matching lines...) Expand all
4841 SLOW_ASSERT(InNewSpace(clone)); 4875 SLOW_ASSERT(InNewSpace(clone));
4842 // Since we know the clone is allocated in new space, we can copy 4876 // Since we know the clone is allocated in new space, we can copy
4843 // the contents without worrying about updating the write barrier. 4877 // the contents without worrying about updating the write barrier.
4844 CopyBlock(HeapObject::cast(clone)->address(), 4878 CopyBlock(HeapObject::cast(clone)->address(),
4845 source->address(), 4879 source->address(),
4846 object_size); 4880 object_size);
4847 4881
4848 if (site != NULL) { 4882 if (site != NULL) {
4849 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( 4883 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4850 reinterpret_cast<Address>(clone) + object_size); 4884 reinterpret_cast<Address>(clone) + object_size);
4851 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 4885 InitializeAllocationMemento(alloc_memento, site);
4852 ASSERT(site->map() == allocation_site_map());
4853 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
4854 HeapProfiler* profiler = isolate()->heap_profiler(); 4886 HeapProfiler* profiler = isolate()->heap_profiler();
4855 if (profiler->is_tracking_allocations()) { 4887 if (profiler->is_tracking_allocations()) {
4856 profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(), 4888 profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
4857 object_size); 4889 object_size);
4858 profiler->NewObjectEvent(alloc_memento->address(), 4890 profiler->NewObjectEvent(alloc_memento->address(),
4859 AllocationMemento::kSize); 4891 AllocationMemento::kSize);
4860 } 4892 }
4861 } 4893 }
4862 } 4894 }
4863 4895
(...skipping 3091 matching lines...) Expand 10 before | Expand all | Expand 10 after
7955 static_cast<int>(object_sizes_last_time_[index])); 7987 static_cast<int>(object_sizes_last_time_[index]));
7956 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) 7988 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
7957 #undef ADJUST_LAST_TIME_OBJECT_COUNT 7989 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7958 7990
7959 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); 7991 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7960 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); 7992 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7961 ClearObjectStats(); 7993 ClearObjectStats();
7962 } 7994 }
7963 7995
7964 } } // namespace v8::internal 7996 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698