Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(9)

Side by Side Diff: src/heap.cc

Issue 7619: - Removed a few indirections by making the two SemiSpaces... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 12 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | src/spaces.h » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
50 50
51 #define STRUCT_ALLOCATION(NAME, Name, name) Map* Heap::name##_map_; 51 #define STRUCT_ALLOCATION(NAME, Name, name) Map* Heap::name##_map_;
52 STRUCT_LIST(STRUCT_ALLOCATION) 52 STRUCT_LIST(STRUCT_ALLOCATION)
53 #undef STRUCT_ALLOCATION 53 #undef STRUCT_ALLOCATION
54 54
55 55
56 #define SYMBOL_ALLOCATION(name, string) String* Heap::name##_; 56 #define SYMBOL_ALLOCATION(name, string) String* Heap::name##_;
57 SYMBOL_LIST(SYMBOL_ALLOCATION) 57 SYMBOL_LIST(SYMBOL_ALLOCATION)
58 #undef SYMBOL_ALLOCATION 58 #undef SYMBOL_ALLOCATION
59 59
60 60 NewSpace Heap::new_space_;
61 NewSpace* Heap::new_space_ = NULL;
62 OldSpace* Heap::old_pointer_space_ = NULL; 61 OldSpace* Heap::old_pointer_space_ = NULL;
63 OldSpace* Heap::old_data_space_ = NULL; 62 OldSpace* Heap::old_data_space_ = NULL;
64 OldSpace* Heap::code_space_ = NULL; 63 OldSpace* Heap::code_space_ = NULL;
65 MapSpace* Heap::map_space_ = NULL; 64 MapSpace* Heap::map_space_ = NULL;
66 LargeObjectSpace* Heap::lo_space_ = NULL; 65 LargeObjectSpace* Heap::lo_space_ = NULL;
67 66
68 int Heap::promoted_space_limit_ = 0; 67 int Heap::promoted_space_limit_ = 0;
69 int Heap::old_gen_exhausted_ = false; 68 int Heap::old_gen_exhausted_ = false;
70 69
71 int Heap::amount_of_external_allocated_memory_ = 0; 70 int Heap::amount_of_external_allocated_memory_ = 0;
(...skipping 24 matching lines...) Expand all
96 bool Heap::allocation_allowed_ = true; 95 bool Heap::allocation_allowed_ = true;
97 96
98 int Heap::allocation_timeout_ = 0; 97 int Heap::allocation_timeout_ = 0;
99 bool Heap::disallow_allocation_failure_ = false; 98 bool Heap::disallow_allocation_failure_ = false;
100 #endif // DEBUG 99 #endif // DEBUG
101 100
102 101
103 int Heap::Capacity() { 102 int Heap::Capacity() {
104 if (!HasBeenSetup()) return 0; 103 if (!HasBeenSetup()) return 0;
105 104
106 return new_space_->Capacity() + 105 return new_space_.Capacity() +
107 old_pointer_space_->Capacity() + 106 old_pointer_space_->Capacity() +
108 old_data_space_->Capacity() + 107 old_data_space_->Capacity() +
109 code_space_->Capacity() + 108 code_space_->Capacity() +
110 map_space_->Capacity(); 109 map_space_->Capacity();
111 } 110 }
112 111
113 112
114 int Heap::Available() { 113 int Heap::Available() {
115 if (!HasBeenSetup()) return 0; 114 if (!HasBeenSetup()) return 0;
116 115
117 return new_space_->Available() + 116 return new_space_.Available() +
118 old_pointer_space_->Available() + 117 old_pointer_space_->Available() +
119 old_data_space_->Available() + 118 old_data_space_->Available() +
120 code_space_->Available() + 119 code_space_->Available() +
121 map_space_->Available(); 120 map_space_->Available();
122 } 121 }
123 122
124 123
125 bool Heap::HasBeenSetup() { 124 bool Heap::HasBeenSetup() {
126 return new_space_ != NULL && 125 return old_pointer_space_ != NULL &&
127 old_pointer_space_ != NULL &&
128 old_data_space_ != NULL && 126 old_data_space_ != NULL &&
129 code_space_ != NULL && 127 code_space_ != NULL &&
130 map_space_ != NULL && 128 map_space_ != NULL &&
131 lo_space_ != NULL; 129 lo_space_ != NULL;
132 } 130 }
133 131
134 132
135 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) { 133 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
136 // Is global GC requested? 134 // Is global GC requested?
137 if (space != NEW_SPACE || FLAG_gc_global) { 135 if (space != NEW_SPACE || FLAG_gc_global) {
(...skipping 16 matching lines...) Expand all
154 152
155 // Is there enough space left in OLD to guarantee that a scavenge can 153 // Is there enough space left in OLD to guarantee that a scavenge can
156 // succeed? 154 // succeed?
157 // 155 //
158 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available 156 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
159 // for object promotion. It counts only the bytes that the memory 157 // for object promotion. It counts only the bytes that the memory
160 // allocator has not yet allocated from the OS and assigned to any space, 158 // allocator has not yet allocated from the OS and assigned to any space,
161 // and does not count available bytes already in the old space or code 159 // and does not count available bytes already in the old space or code
162 // space. Undercounting is safe---we may get an unrequested full GC when 160 // space. Undercounting is safe---we may get an unrequested full GC when
163 // a scavenge would have succeeded. 161 // a scavenge would have succeeded.
164 if (MemoryAllocator::MaxAvailable() <= new_space_->Size()) { 162 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
165 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment(); 163 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
166 return MARK_COMPACTOR; 164 return MARK_COMPACTOR;
167 } 165 }
168 166
169 // Default 167 // Default
170 return SCAVENGER; 168 return SCAVENGER;
171 } 169 }
172 170
173 171
174 // TODO(1238405): Combine the infrastructure for --heap-stats and 172 // TODO(1238405): Combine the infrastructure for --heap-stats and
175 // --log-gc to avoid the complicated preprocessor and flag testing. 173 // --log-gc to avoid the complicated preprocessor and flag testing.
176 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 174 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
177 void Heap::ReportStatisticsBeforeGC() { 175 void Heap::ReportStatisticsBeforeGC() {
178 // Heap::ReportHeapStatistics will also log NewSpace statistics when 176 // Heap::ReportHeapStatistics will also log NewSpace statistics when
179 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The 177 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
180 // following logic is used to avoid double logging. 178 // following logic is used to avoid double logging.
181 #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING) 179 #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
182 if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics(); 180 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
183 if (FLAG_heap_stats) { 181 if (FLAG_heap_stats) {
184 ReportHeapStatistics("Before GC"); 182 ReportHeapStatistics("Before GC");
185 } else if (FLAG_log_gc) { 183 } else if (FLAG_log_gc) {
186 new_space_->ReportStatistics(); 184 new_space_.ReportStatistics();
187 } 185 }
188 if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms(); 186 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
189 #elif defined(DEBUG) 187 #elif defined(DEBUG)
190 if (FLAG_heap_stats) { 188 if (FLAG_heap_stats) {
191 new_space_->CollectStatistics(); 189 new_space_.CollectStatistics();
192 ReportHeapStatistics("Before GC"); 190 ReportHeapStatistics("Before GC");
193 new_space_->ClearHistograms(); 191 new_space_.ClearHistograms();
194 } 192 }
195 #elif defined(ENABLE_LOGGING_AND_PROFILING) 193 #elif defined(ENABLE_LOGGING_AND_PROFILING)
196 if (FLAG_log_gc) { 194 if (FLAG_log_gc) {
197 new_space_->CollectStatistics(); 195 new_space_.CollectStatistics();
198 new_space_->ReportStatistics(); 196 new_space_.ReportStatistics();
199 new_space_->ClearHistograms(); 197 new_space_.ClearHistograms();
200 } 198 }
201 #endif 199 #endif
202 } 200 }
203 201
204 202
205 // TODO(1238405): Combine the infrastructure for --heap-stats and 203 // TODO(1238405): Combine the infrastructure for --heap-stats and
206 // --log-gc to avoid the complicated preprocessor and flag testing. 204 // --log-gc to avoid the complicated preprocessor and flag testing.
207 void Heap::ReportStatisticsAfterGC() { 205 void Heap::ReportStatisticsAfterGC() {
208 // Similar to the before GC, we use some complicated logic to ensure that 206 // Similar to the before GC, we use some complicated logic to ensure that
209 // NewSpace statistics are logged exactly once when --log-gc is turned on. 207 // NewSpace statistics are logged exactly once when --log-gc is turned on.
210 #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING) 208 #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
211 if (FLAG_heap_stats) { 209 if (FLAG_heap_stats) {
212 ReportHeapStatistics("After GC"); 210 ReportHeapStatistics("After GC");
213 } else if (FLAG_log_gc) { 211 } else if (FLAG_log_gc) {
214 new_space_->ReportStatistics(); 212 new_space_.ReportStatistics();
215 } 213 }
216 #elif defined(DEBUG) 214 #elif defined(DEBUG)
217 if (FLAG_heap_stats) ReportHeapStatistics("After GC"); 215 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
218 #elif defined(ENABLE_LOGGING_AND_PROFILING) 216 #elif defined(ENABLE_LOGGING_AND_PROFILING)
219 if (FLAG_log_gc) new_space_->ReportStatistics(); 217 if (FLAG_log_gc) new_space_.ReportStatistics();
220 #endif 218 #endif
221 } 219 }
222 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 220 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
223 221
224 222
225 void Heap::GarbageCollectionPrologue() { 223 void Heap::GarbageCollectionPrologue() {
226 RegExpImpl::NewSpaceCollectionPrologue(); 224 RegExpImpl::NewSpaceCollectionPrologue();
227 gc_count_++; 225 gc_count_++;
228 #ifdef DEBUG 226 #ifdef DEBUG
229 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); 227 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
322 GarbageCollectionEpilogue(); 320 GarbageCollectionEpilogue();
323 } 321 }
324 322
325 323
326 #ifdef ENABLE_LOGGING_AND_PROFILING 324 #ifdef ENABLE_LOGGING_AND_PROFILING
327 if (FLAG_log_gc) HeapProfiler::WriteSample(); 325 if (FLAG_log_gc) HeapProfiler::WriteSample();
328 #endif 326 #endif
329 327
330 switch (space) { 328 switch (space) {
331 case NEW_SPACE: 329 case NEW_SPACE:
332 return new_space_->Available() >= requested_size; 330 return new_space_.Available() >= requested_size;
333 case OLD_POINTER_SPACE: 331 case OLD_POINTER_SPACE:
334 return old_pointer_space_->Available() >= requested_size; 332 return old_pointer_space_->Available() >= requested_size;
335 case OLD_DATA_SPACE: 333 case OLD_DATA_SPACE:
336 return old_data_space_->Available() >= requested_size; 334 return old_data_space_->Available() >= requested_size;
337 case CODE_SPACE: 335 case CODE_SPACE:
338 return code_space_->Available() >= requested_size; 336 return code_space_->Available() >= requested_size;
339 case MAP_SPACE: 337 case MAP_SPACE:
340 return map_space_->Available() >= requested_size; 338 return map_space_->Available() >= requested_size;
341 case LO_SPACE: 339 case LO_SPACE:
342 return lo_space_->Available() >= requested_size; 340 return lo_space_->Available() >= requested_size;
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
454 CopyObject(p); 452 CopyObject(p);
455 } 453 }
456 454
457 void VisitPointers(Object** start, Object** end) { 455 void VisitPointers(Object** start, Object** end) {
458 // Copy all HeapObject pointers in [start, end) 456 // Copy all HeapObject pointers in [start, end)
459 for (Object** p = start; p < end; p++) CopyObject(p); 457 for (Object** p = start; p < end; p++) CopyObject(p);
460 } 458 }
461 459
462 private: 460 private:
463 void CopyObject(Object** p) { 461 void CopyObject(Object** p) {
464 if (!Heap::InFromSpace(*p)) return; 462 if (!Heap::InNewSpace(*p)) return;
465 Heap::CopyObject(reinterpret_cast<HeapObject**>(p)); 463 Heap::CopyObject(reinterpret_cast<HeapObject**>(p));
466 } 464 }
467 }; 465 };
468 466
469 467
470 // Shared state read by the scavenge collector and set by CopyObject. 468 // Shared state read by the scavenge collector and set by CopyObject.
471 static Address promoted_top = NULL; 469 static Address promoted_top = NULL;
472 470
473 471
474 #ifdef DEBUG 472 #ifdef DEBUG
(...skipping 28 matching lines...) Expand all
503 } 501 }
504 } 502 }
505 #endif 503 #endif
506 504
507 gc_state_ = SCAVENGE; 505 gc_state_ = SCAVENGE;
508 506
509 // Implements Cheney's copying algorithm 507 // Implements Cheney's copying algorithm
510 LOG(ResourceEvent("scavenge", "begin")); 508 LOG(ResourceEvent("scavenge", "begin"));
511 509
512 scavenge_count_++; 510 scavenge_count_++;
513 if (new_space_->Capacity() < new_space_->MaximumCapacity() && 511 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
514 scavenge_count_ > new_space_growth_limit_) { 512 scavenge_count_ > new_space_growth_limit_) {
515 // Double the size of the new space, and double the limit. The next 513 // Double the size of the new space, and double the limit. The next
516 // doubling attempt will occur after the current new_space_growth_limit_ 514 // doubling attempt will occur after the current new_space_growth_limit_
517 // more collections. 515 // more collections.
518 // TODO(1240712): NewSpace::Double has a return value which is 516 // TODO(1240712): NewSpace::Double has a return value which is
519 // ignored here. 517 // ignored here.
520 new_space_->Double(); 518 new_space_.Double();
521 new_space_growth_limit_ *= 2; 519 new_space_growth_limit_ *= 2;
522 } 520 }
523 521
524 // Flip the semispaces. After flipping, to space is empty, from space has 522 // Flip the semispaces. After flipping, to space is empty, from space has
525 // live objects. 523 // live objects.
526 new_space_->Flip(); 524 new_space_.Flip();
527 new_space_->ResetAllocationInfo(); 525 new_space_.ResetAllocationInfo();
528 526
529 // We need to sweep newly copied objects which can be in either the to space 527 // We need to sweep newly copied objects which can be in either the to space
530 // or the old space. For to space objects, we use a mark. Newly copied 528 // or the old space. For to space objects, we use a mark. Newly copied
531 // objects lie between the mark and the allocation top. For objects 529 // objects lie between the mark and the allocation top. For objects
532 // promoted to old space, we write their addresses downward from the top of 530 // promoted to old space, we write their addresses downward from the top of
533 // the new space. Sweeping newly promoted objects requires an allocation 531 // the new space. Sweeping newly promoted objects requires an allocation
534 // pointer and a mark. Note that the allocation pointer 'top' actually 532 // pointer and a mark. Note that the allocation pointer 'top' actually
535 // moves downward from the high address in the to space. 533 // moves downward from the high address in the to space.
536 // 534 //
537 // There is guaranteed to be enough room at the top of the to space for the 535 // There is guaranteed to be enough room at the top of the to space for the
538 // addresses of promoted objects: every object promoted frees up its size in 536 // addresses of promoted objects: every object promoted frees up its size in
539 // bytes from the top of the new space, and objects are at least one pointer 537 // bytes from the top of the new space, and objects are at least one pointer
540 // in size. Using the new space to record promoted addresses makes the 538 // in size. Using the new space to record promoted addresses makes the
541 // scavenge collector agnostic to the allocation strategy (eg, linear or 539 // scavenge collector agnostic to the allocation strategy (eg, linear or
542 // free-list) used in old space. 540 // free-list) used in old space.
543 Address new_mark = new_space_->ToSpaceLow(); 541 Address new_mark = new_space_.ToSpaceLow();
544 Address promoted_mark = new_space_->ToSpaceHigh(); 542 Address promoted_mark = new_space_.ToSpaceHigh();
545 promoted_top = new_space_->ToSpaceHigh(); 543 promoted_top = new_space_.ToSpaceHigh();
546 544
547 CopyVisitor copy_visitor; 545 CopyVisitor copy_visitor;
548 // Copy roots. 546 // Copy roots.
549 IterateRoots(&copy_visitor); 547 IterateRoots(&copy_visitor);
550 548
551 // Copy objects reachable from the old generation. By definition, there 549 // Copy objects reachable from the old generation. By definition, there
552 // are no intergenerational pointers in code or data spaces. 550 // are no intergenerational pointers in code or data spaces.
553 IterateRSet(old_pointer_space_, &CopyObject); 551 IterateRSet(old_pointer_space_, &CopyObject);
554 IterateRSet(map_space_, &CopyObject); 552 IterateRSet(map_space_, &CopyObject);
555 lo_space_->IterateRSet(&CopyObject); 553 lo_space_->IterateRSet(&CopyObject);
556 554
557 bool has_processed_weak_pointers = false; 555 bool has_processed_weak_pointers = false;
558 556
559 while (true) { 557 while (true) {
560 ASSERT(new_mark <= new_space_->top()); 558 ASSERT(new_mark <= new_space_.top());
561 ASSERT(promoted_mark >= promoted_top); 559 ASSERT(promoted_mark >= promoted_top);
562 560
563 // Copy objects reachable from newly copied objects. 561 // Copy objects reachable from newly copied objects.
564 while (new_mark < new_space_->top() || promoted_mark > promoted_top) { 562 while (new_mark < new_space_.top() || promoted_mark > promoted_top) {
565 // Sweep newly copied objects in the to space. The allocation pointer 563 // Sweep newly copied objects in the to space. The allocation pointer
566 // can change during sweeping. 564 // can change during sweeping.
567 Address previous_top = new_space_->top(); 565 Address previous_top = new_space_.top();
568 SemiSpaceIterator new_it(new_space_, new_mark); 566 SemiSpaceIterator new_it(new_space(), new_mark);
569 while (new_it.has_next()) { 567 while (new_it.has_next()) {
570 new_it.next()->Iterate(&copy_visitor); 568 new_it.next()->Iterate(&copy_visitor);
571 } 569 }
572 new_mark = previous_top; 570 new_mark = previous_top;
573 571
574 // Sweep newly copied objects in the old space. The promotion 'top' 572 // Sweep newly copied objects in the old space. The promotion 'top'
575 // pointer could change during sweeping. 573 // pointer could change during sweeping.
576 previous_top = promoted_top; 574 previous_top = promoted_top;
577 for (Address current = promoted_mark - kPointerSize; 575 for (Address current = promoted_mark - kPointerSize;
578 current >= previous_top; 576 current >= previous_top;
579 current -= kPointerSize) { 577 current -= kPointerSize) {
580 HeapObject* object = HeapObject::cast(Memory::Object_at(current)); 578 HeapObject* object = HeapObject::cast(Memory::Object_at(current));
581 object->Iterate(&copy_visitor); 579 object->Iterate(&copy_visitor);
582 UpdateRSet(object); 580 UpdateRSet(object);
583 } 581 }
584 promoted_mark = previous_top; 582 promoted_mark = previous_top;
585 } 583 }
586 584
587 if (has_processed_weak_pointers) break; // We are done. 585 if (has_processed_weak_pointers) break; // We are done.
588 // Copy objects reachable from weak pointers. 586 // Copy objects reachable from weak pointers.
589 GlobalHandles::IterateWeakRoots(&copy_visitor); 587 GlobalHandles::IterateWeakRoots(&copy_visitor);
590 has_processed_weak_pointers = true; 588 has_processed_weak_pointers = true;
591 } 589 }
592 590
593 // Set age mark. 591 // Set age mark.
594 new_space_->set_age_mark(new_mark); 592 new_space_.set_age_mark(new_mark);
595 593
596 LOG(ResourceEvent("scavenge", "end")); 594 LOG(ResourceEvent("scavenge", "end"));
597 595
598 gc_state_ = NOT_IN_GC; 596 gc_state_ = NOT_IN_GC;
599 } 597 }
600 598
601 599
602 void Heap::ClearRSetRange(Address start, int size_in_bytes) { 600 void Heap::ClearRSetRange(Address start, int size_in_bytes) {
603 uint32_t start_bit; 601 uint32_t start_bit;
604 Address start_word_address = 602 Address start_word_address =
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
711 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 709 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
712 void Heap::RecordCopiedObject(HeapObject* obj) { 710 void Heap::RecordCopiedObject(HeapObject* obj) {
713 bool should_record = false; 711 bool should_record = false;
714 #ifdef DEBUG 712 #ifdef DEBUG
715 should_record = FLAG_heap_stats; 713 should_record = FLAG_heap_stats;
716 #endif 714 #endif
717 #ifdef ENABLE_LOGGING_AND_PROFILING 715 #ifdef ENABLE_LOGGING_AND_PROFILING
718 should_record = should_record || FLAG_log_gc; 716 should_record = should_record || FLAG_log_gc;
719 #endif 717 #endif
720 if (should_record) { 718 if (should_record) {
721 if (new_space_->Contains(obj)) { 719 if (new_space_.Contains(obj)) {
722 new_space_->RecordAllocation(obj); 720 new_space_.RecordAllocation(obj);
723 } else { 721 } else {
724 new_space_->RecordPromotion(obj); 722 new_space_.RecordPromotion(obj);
725 } 723 }
726 } 724 }
727 } 725 }
728 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 726 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
729 727
730 728
731 HeapObject* Heap::MigrateObject(HeapObject** source_p, 729 HeapObject* Heap::MigrateObject(HeapObject* source,
732 HeapObject* target, 730 HeapObject* target,
733 int size) { 731 int size) {
734 void** src = reinterpret_cast<void**>((*source_p)->address()); 732 void** src = reinterpret_cast<void**>(source->address());
735 void** dst = reinterpret_cast<void**>(target->address()); 733 void** dst = reinterpret_cast<void**>(target->address());
736 734
737 // Use block copying memcpy if the object we're migrating is big 735 // Use block copying memcpy if the object we're migrating is big
738 // enough to justify the extra call/setup overhead. 736 // enough to justify the extra call/setup overhead.
739 static const int kBlockCopyLimit = 16 * kPointerSize; 737 static const int kBlockCopyLimit = 16 * kPointerSize;
740 738
741 if (size >= kBlockCopyLimit) { 739 if (size >= kBlockCopyLimit) {
742 memcpy(dst, src, size); 740 memcpy(dst, src, size);
743 } else { 741 } else {
744 int remaining = size / kPointerSize; 742 int remaining = size / kPointerSize;
745 do { 743 do {
746 remaining--; 744 remaining--;
747 *dst++ = *src++; 745 *dst++ = *src++;
748 } while (remaining > 0); 746 } while (remaining > 0);
749 } 747 }
750 748
751 // Set the forwarding address. 749 // Set the forwarding address.
752 (*source_p)->set_map_word(MapWord::FromForwardingAddress(target)); 750 source->set_map_word(MapWord::FromForwardingAddress(target));
753 751
754 // Update NewSpace stats if necessary. 752 // Update NewSpace stats if necessary.
755 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 753 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
756 RecordCopiedObject(target); 754 RecordCopiedObject(target);
757 #endif 755 #endif
758 756
759 return target; 757 return target;
760 } 758 }
761 759
762 760
(...skipping 19 matching lines...) Expand all
782 // Heap::empty_string(). We do not use object->IsConsString because we 780 // Heap::empty_string(). We do not use object->IsConsString because we
783 // already know that object has the heap object tag. 781 // already know that object has the heap object tag.
784 InstanceType type = first_word.ToMap()->instance_type(); 782 InstanceType type = first_word.ToMap()->instance_type();
785 if (type < FIRST_NONSTRING_TYPE && 783 if (type < FIRST_NONSTRING_TYPE &&
786 String::cast(object)->representation_tag() == kConsStringTag && 784 String::cast(object)->representation_tag() == kConsStringTag &&
787 ConsString::cast(object)->second() == Heap::empty_string()) { 785 ConsString::cast(object)->second() == Heap::empty_string()) {
788 object = HeapObject::cast(ConsString::cast(object)->first()); 786 object = HeapObject::cast(ConsString::cast(object)->first());
789 *p = object; 787 *p = object;
790 // After patching *p we have to repeat the checks that object is in the 788 // After patching *p we have to repeat the checks that object is in the
791 // active semispace of the young generation and not already copied. 789 // active semispace of the young generation and not already copied.
792 if (!InFromSpace(object)) return; 790 if (!InNewSpace(object)) return;
793 first_word = object->map_word(); 791 first_word = object->map_word();
794 if (first_word.IsForwardingAddress()) { 792 if (first_word.IsForwardingAddress()) {
795 *p = first_word.ToForwardingAddress(); 793 *p = first_word.ToForwardingAddress();
796 return; 794 return;
797 } 795 }
798 type = first_word.ToMap()->instance_type(); 796 type = first_word.ToMap()->instance_type();
799 } 797 }
800 798
801 int object_size = object->SizeFromMap(first_word.ToMap()); 799 int object_size = object->SizeFromMap(first_word.ToMap());
802 Object* result; 800 Object* result;
803 // If the object should be promoted, we try to copy it to old space. 801 // If the object should be promoted, we try to copy it to old space.
804 if (ShouldBePromoted(object->address(), object_size)) { 802 if (ShouldBePromoted(object->address(), object_size)) {
805 OldSpace* target_space = Heap::TargetSpace(object); 803 OldSpace* target_space = Heap::TargetSpace(object);
806 ASSERT(target_space == Heap::old_pointer_space_ || 804 ASSERT(target_space == Heap::old_pointer_space_ ||
807 target_space == Heap::old_data_space_); 805 target_space == Heap::old_data_space_);
808 result = target_space->AllocateRaw(object_size); 806 result = target_space->AllocateRaw(object_size);
809 807
810 if (!result->IsFailure()) { 808 if (!result->IsFailure()) {
811 *p = MigrateObject(p, HeapObject::cast(result), object_size); 809 *p = MigrateObject(object, HeapObject::cast(result), object_size);
812 if (target_space == Heap::old_pointer_space_) { 810 if (target_space == Heap::old_pointer_space_) {
813 // Record the object's address at the top of the to space, to allow 811 // Record the object's address at the top of the to space, to allow
814 // it to be swept by the scavenger. 812 // it to be swept by the scavenger.
815 promoted_top -= kPointerSize; 813 promoted_top -= kPointerSize;
816 Memory::Object_at(promoted_top) = *p; 814 Memory::Object_at(promoted_top) = *p;
817 } else { 815 } else {
818 #ifdef DEBUG 816 #ifdef DEBUG
819 // Objects promoted to the data space should not have pointers to 817 // Objects promoted to the data space should not have pointers to
820 // new space. 818 // new space.
821 VerifyNonPointerSpacePointersVisitor v; 819 VerifyNonPointerSpacePointersVisitor v;
822 (*p)->Iterate(&v); 820 (*p)->Iterate(&v);
823 #endif 821 #endif
824 } 822 }
825 return; 823 return;
826 } 824 }
827 } 825 }
828 826
829 // The object should remain in new space or the old space allocation failed. 827 // The object should remain in new space or the old space allocation failed.
830 result = new_space_->AllocateRaw(object_size); 828 result = new_space_.AllocateRaw(object_size);
831 // Failed allocation at this point is utterly unexpected. 829 // Failed allocation at this point is utterly unexpected.
832 ASSERT(!result->IsFailure()); 830 ASSERT(!result->IsFailure());
833 *p = MigrateObject(p, HeapObject::cast(result), object_size); 831 *p = MigrateObject(object, HeapObject::cast(result), object_size);
834 } 832 }
835 833
836 834
837 Object* Heap::AllocatePartialMap(InstanceType instance_type, 835 Object* Heap::AllocatePartialMap(InstanceType instance_type,
838 int instance_size) { 836 int instance_size) {
839 Object* result = AllocateRawMap(Map::kSize); 837 Object* result = AllocateRawMap(Map::kSize);
840 if (result->IsFailure()) return result; 838 if (result->IsFailure()) return result;
841 839
842 // Map::cast cannot be used due to uninitialized map field. 840 // Map::cast cannot be used due to uninitialized map field.
843 reinterpret_cast<Map*>(result)->set_map(meta_map()); 841 reinterpret_cast<Map*>(result)->set_map(meta_map());
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after
1023 HeapNumber::cast(result)->set_value(value); 1021 HeapNumber::cast(result)->set_value(value);
1024 return result; 1022 return result;
1025 } 1023 }
1026 1024
1027 1025
1028 Object* Heap::AllocateHeapNumber(double value) { 1026 Object* Heap::AllocateHeapNumber(double value) {
1029 // This version of AllocateHeapNumber is optimized for 1027 // This version of AllocateHeapNumber is optimized for
1030 // allocation in new space. 1028 // allocation in new space.
1031 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize); 1029 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1032 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); 1030 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1033 Object* result = new_space_->AllocateRaw(HeapNumber::kSize); 1031 Object* result = new_space_.AllocateRaw(HeapNumber::kSize);
1034 if (result->IsFailure()) return result; 1032 if (result->IsFailure()) return result;
1035 HeapObject::cast(result)->set_map(heap_number_map()); 1033 HeapObject::cast(result)->set_map(heap_number_map());
1036 HeapNumber::cast(result)->set_value(value); 1034 HeapNumber::cast(result)->set_value(value);
1037 return result; 1035 return result;
1038 } 1036 }
1039 1037
1040 1038
1041 Object* Heap::CreateOddball(Map* map, 1039 Object* Heap::CreateOddball(Map* map,
1042 const char* to_string, 1040 const char* to_string,
1043 Object* to_number) { 1041 Object* to_number) {
(...skipping 1140 matching lines...) Expand 10 before | Expand all | Expand 10 after
2184 PrintF("promoted_space_limit_ %d\n", promoted_space_limit_); 2182 PrintF("promoted_space_limit_ %d\n", promoted_space_limit_);
2185 2183
2186 PrintF("\n"); 2184 PrintF("\n");
2187 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles()); 2185 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
2188 GlobalHandles::PrintStats(); 2186 GlobalHandles::PrintStats();
2189 PrintF("\n"); 2187 PrintF("\n");
2190 2188
2191 PrintF("Heap statistics : "); 2189 PrintF("Heap statistics : ");
2192 MemoryAllocator::ReportStatistics(); 2190 MemoryAllocator::ReportStatistics();
2193 PrintF("To space : "); 2191 PrintF("To space : ");
2194 new_space_->ReportStatistics(); 2192 new_space_.ReportStatistics();
2195 PrintF("Old pointer space : "); 2193 PrintF("Old pointer space : ");
2196 old_pointer_space_->ReportStatistics(); 2194 old_pointer_space_->ReportStatistics();
2197 PrintF("Old data space : "); 2195 PrintF("Old data space : ");
2198 old_data_space_->ReportStatistics(); 2196 old_data_space_->ReportStatistics();
2199 PrintF("Code space : "); 2197 PrintF("Code space : ");
2200 code_space_->ReportStatistics(); 2198 code_space_->ReportStatistics();
2201 PrintF("Map space : "); 2199 PrintF("Map space : ");
2202 map_space_->ReportStatistics(); 2200 map_space_->ReportStatistics();
2203 PrintF("Large object space : "); 2201 PrintF("Large object space : ");
2204 lo_space_->ReportStatistics(); 2202 lo_space_->ReportStatistics();
2205 PrintF(">>>>>> ========================================= >>>>>>\n"); 2203 PrintF(">>>>>> ========================================= >>>>>>\n");
2206 } 2204 }
2207 2205
2208 #endif // DEBUG 2206 #endif // DEBUG
2209 2207
2210 bool Heap::Contains(HeapObject* value) { 2208 bool Heap::Contains(HeapObject* value) {
2211 return Contains(value->address()); 2209 return Contains(value->address());
2212 } 2210 }
2213 2211
2214 2212
2215 bool Heap::Contains(Address addr) { 2213 bool Heap::Contains(Address addr) {
2216 if (OS::IsOutsideAllocatedSpace(addr)) return false; 2214 if (OS::IsOutsideAllocatedSpace(addr)) return false;
2217 return HasBeenSetup() && 2215 return HasBeenSetup() &&
2218 (new_space_->ToSpaceContains(addr) || 2216 (new_space_.ToSpaceContains(addr) ||
2219 old_pointer_space_->Contains(addr) || 2217 old_pointer_space_->Contains(addr) ||
2220 old_data_space_->Contains(addr) || 2218 old_data_space_->Contains(addr) ||
2221 code_space_->Contains(addr) || 2219 code_space_->Contains(addr) ||
2222 map_space_->Contains(addr) || 2220 map_space_->Contains(addr) ||
2223 lo_space_->SlowContains(addr)); 2221 lo_space_->SlowContains(addr));
2224 } 2222 }
2225 2223
2226 2224
2227 bool Heap::InSpace(HeapObject* value, AllocationSpace space) { 2225 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
2228 return InSpace(value->address(), space); 2226 return InSpace(value->address(), space);
2229 } 2227 }
2230 2228
2231 2229
2232 bool Heap::InSpace(Address addr, AllocationSpace space) { 2230 bool Heap::InSpace(Address addr, AllocationSpace space) {
2233 if (OS::IsOutsideAllocatedSpace(addr)) return false; 2231 if (OS::IsOutsideAllocatedSpace(addr)) return false;
2234 if (!HasBeenSetup()) return false; 2232 if (!HasBeenSetup()) return false;
2235 2233
2236 switch (space) { 2234 switch (space) {
2237 case NEW_SPACE: 2235 case NEW_SPACE:
2238 return new_space_->ToSpaceContains(addr); 2236 return new_space_.ToSpaceContains(addr);
2239 case OLD_POINTER_SPACE: 2237 case OLD_POINTER_SPACE:
2240 return old_pointer_space_->Contains(addr); 2238 return old_pointer_space_->Contains(addr);
2241 case OLD_DATA_SPACE: 2239 case OLD_DATA_SPACE:
2242 return old_data_space_->Contains(addr); 2240 return old_data_space_->Contains(addr);
2243 case CODE_SPACE: 2241 case CODE_SPACE:
2244 return code_space_->Contains(addr); 2242 return code_space_->Contains(addr);
2245 case MAP_SPACE: 2243 case MAP_SPACE:
2246 return map_space_->Contains(addr); 2244 return map_space_->Contains(addr);
2247 case LO_SPACE: 2245 case LO_SPACE:
2248 return lo_space_->SlowContains(addr); 2246 return lo_space_->SlowContains(addr);
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
2296 return true; 2294 return true;
2297 } 2295 }
2298 SymbolTable* table = SymbolTable::cast(symbol_table_); 2296 SymbolTable* table = SymbolTable::cast(symbol_table_);
2299 return table->LookupSymbolIfExists(string, symbol); 2297 return table->LookupSymbolIfExists(string, symbol);
2300 } 2298 }
2301 2299
2302 2300
2303 #ifdef DEBUG 2301 #ifdef DEBUG
2304 void Heap::ZapFromSpace() { 2302 void Heap::ZapFromSpace() {
2305 ASSERT(HAS_HEAP_OBJECT_TAG(kFromSpaceZapValue)); 2303 ASSERT(HAS_HEAP_OBJECT_TAG(kFromSpaceZapValue));
2306 for (Address a = new_space_->FromSpaceLow(); 2304 for (Address a = new_space_.FromSpaceLow();
2307 a < new_space_->FromSpaceHigh(); 2305 a < new_space_.FromSpaceHigh();
2308 a += kPointerSize) { 2306 a += kPointerSize) {
2309 Memory::Address_at(a) = kFromSpaceZapValue; 2307 Memory::Address_at(a) = kFromSpaceZapValue;
2310 } 2308 }
2311 } 2309 }
2312 #endif // DEBUG 2310 #endif // DEBUG
2313 2311
2314 2312
2315 void Heap::IterateRSetRange(Address object_start, 2313 void Heap::IterateRSetRange(Address object_start,
2316 Address object_end, 2314 Address object_end,
2317 Address rset_start, 2315 Address rset_start,
2318 ObjectSlotCallback copy_object_func) { 2316 ObjectSlotCallback copy_object_func) {
2319 Address object_address = object_start; 2317 Address object_address = object_start;
2320 Address rset_address = rset_start; 2318 Address rset_address = rset_start;
2321 2319
2322 // Loop over all the pointers in [object_start, object_end). 2320 // Loop over all the pointers in [object_start, object_end).
2323 while (object_address < object_end) { 2321 while (object_address < object_end) {
2324 uint32_t rset_word = Memory::uint32_at(rset_address); 2322 uint32_t rset_word = Memory::uint32_at(rset_address);
2325
2326 if (rset_word != 0) { 2323 if (rset_word != 0) {
2327 // Bits were set.
2328 uint32_t result_rset = rset_word; 2324 uint32_t result_rset = rset_word;
2329 2325 for(uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
Kevin Millikin (Chromium) 2008/10/17 09:02:41 Need a space after 'for'.
2330 // Loop over all the bits in the remembered set word. Though
2331 // remembered sets are sparse, faster (eg, binary) search for
2332 // set bits does not seem to help much here.
2333 for (int bit_offset = 0; bit_offset < kBitsPerInt; bit_offset++) {
2334 uint32_t bitmask = 1 << bit_offset;
2335 // Do not dereference pointers at or past object_end. 2326 // Do not dereference pointers at or past object_end.
2336 if ((rset_word & bitmask) != 0 && object_address < object_end) { 2327 if ((rset_word & bitmask) != 0 && object_address < object_end) {
2337 Object** object_p = reinterpret_cast<Object**>(object_address); 2328 Object** object_p = reinterpret_cast<Object**>(object_address);
2338 if (Heap::InFromSpace(*object_p)) { 2329 if (Heap::InNewSpace(*object_p)) {
2339 copy_object_func(reinterpret_cast<HeapObject**>(object_p)); 2330 copy_object_func(reinterpret_cast<HeapObject**>(object_p));
2340 } 2331 }
2341 // If this pointer does not need to be remembered anymore, clear 2332 // If this pointer does not need to be remembered anymore, clear
2342 // the remembered set bit. 2333 // the remembered set bit.
2343 if (!Heap::InToSpace(*object_p)) result_rset &= ~bitmask; 2334 if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
2344 } 2335 }
2345 object_address += kPointerSize; 2336 object_address += kPointerSize;
2346 } 2337 }
2347
2348 // Update the remembered set if it has changed. 2338 // Update the remembered set if it has changed.
2349 if (result_rset != rset_word) { 2339 if (result_rset != rset_word) {
2350 Memory::uint32_at(rset_address) = result_rset; 2340 Memory::uint32_at(rset_address) = result_rset;
2351 } 2341 }
2352 } else { 2342 } else {
2353 // No bits in the word were set. This is the common case. 2343 // No bits in the word were set. This is the common case.
2354 object_address += kPointerSize * kBitsPerInt; 2344 object_address += kPointerSize * kBitsPerInt;
2355 } 2345 }
2356
2357 rset_address += kIntSize; 2346 rset_address += kIntSize;
2358 } 2347 }
2359 } 2348 }
2360 2349
2361 2350
2362 void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) { 2351 void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
2363 ASSERT(Page::is_rset_in_use()); 2352 ASSERT(Page::is_rset_in_use());
2364 ASSERT(space == old_pointer_space_ || space == map_space_); 2353 ASSERT(space == old_pointer_space_ || space == map_space_);
2365 2354
2366 PageIterator it(space, PageIterator::PAGES_IN_USE); 2355 PageIterator it(space, PageIterator::PAGES_IN_USE);
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
2510 // code space. Align the pair of semispaces to their size, which must be 2499 // code space. Align the pair of semispaces to their size, which must be
2511 // a power of 2. 2500 // a power of 2.
2512 ASSERT(IsPowerOf2(young_generation_size_)); 2501 ASSERT(IsPowerOf2(young_generation_size_));
2513 Address code_space_start = reinterpret_cast<Address>(chunk); 2502 Address code_space_start = reinterpret_cast<Address>(chunk);
2514 Address new_space_start = RoundUp(code_space_start, young_generation_size_); 2503 Address new_space_start = RoundUp(code_space_start, young_generation_size_);
2515 Address old_space_start = new_space_start + young_generation_size_; 2504 Address old_space_start = new_space_start + young_generation_size_;
2516 int code_space_size = new_space_start - code_space_start; 2505 int code_space_size = new_space_start - code_space_start;
2517 int old_space_size = young_generation_size_ - code_space_size; 2506 int old_space_size = young_generation_size_ - code_space_size;
2518 2507
2519 // Initialize new space. 2508 // Initialize new space.
2520 new_space_ = new NewSpace(initial_semispace_size_, 2509 if (!new_space_.Setup(new_space_start, young_generation_size_)) return false;
2521 semispace_size_,
2522 NEW_SPACE);
2523 if (new_space_ == NULL) return false;
2524 if (!new_space_->Setup(new_space_start, young_generation_size_)) return false;
2525 2510
2526 // Initialize old space, set the maximum capacity to the old generation 2511 // Initialize old space, set the maximum capacity to the old generation
2527 // size. It will not contain code. 2512 // size. It will not contain code.
2528 old_pointer_space_ = 2513 old_pointer_space_ =
2529 new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE); 2514 new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
2530 if (old_pointer_space_ == NULL) return false; 2515 if (old_pointer_space_ == NULL) return false;
2531 if (!old_pointer_space_->Setup(old_space_start, old_space_size >> 1)) { 2516 if (!old_pointer_space_->Setup(old_space_start, old_space_size >> 1)) {
2532 return false; 2517 return false;
2533 } 2518 }
2534 old_data_space_ = 2519 old_data_space_ =
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
2572 LOG(IntEvent("heap-capacity", Capacity())); 2557 LOG(IntEvent("heap-capacity", Capacity()));
2573 LOG(IntEvent("heap-available", Available())); 2558 LOG(IntEvent("heap-available", Available()));
2574 2559
2575 return true; 2560 return true;
2576 } 2561 }
2577 2562
2578 2563
2579 void Heap::TearDown() { 2564 void Heap::TearDown() {
2580 GlobalHandles::TearDown(); 2565 GlobalHandles::TearDown();
2581 2566
2582 if (new_space_ != NULL) { 2567 new_space_.TearDown();
2583 new_space_->TearDown();
2584 delete new_space_;
2585 new_space_ = NULL;
2586 }
2587 2568
2588 if (old_pointer_space_ != NULL) { 2569 if (old_pointer_space_ != NULL) {
2589 old_pointer_space_->TearDown(); 2570 old_pointer_space_->TearDown();
2590 delete old_pointer_space_; 2571 delete old_pointer_space_;
2591 old_pointer_space_ = NULL; 2572 old_pointer_space_ = NULL;
2592 } 2573 }
2593 2574
2594 if (old_data_space_ != NULL) { 2575 if (old_data_space_ != NULL) {
2595 old_data_space_->TearDown(); 2576 old_data_space_->TearDown();
2596 delete old_data_space_; 2577 delete old_data_space_;
(...skipping 487 matching lines...) Expand 10 before | Expand all | Expand 10 after
3084 return "Scavenge"; 3065 return "Scavenge";
3085 case MARK_COMPACTOR: 3066 case MARK_COMPACTOR:
3086 return MarkCompactCollector::HasCompacted() ? "Mark-compact" 3067 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
3087 : "Mark-sweep"; 3068 : "Mark-sweep";
3088 } 3069 }
3089 return "Unknown GC"; 3070 return "Unknown GC";
3090 } 3071 }
3091 3072
3092 3073
3093 } } // namespace v8::internal 3074 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | src/spaces.h » ('J')

Powered by Google App Engine
This is Rietveld 408576698