OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/api.h" | 8 #include "src/api.h" |
9 #include "src/base/once.h" | 9 #include "src/base/once.h" |
10 #include "src/base/utils/random-number-generator.h" | 10 #include "src/base/utils/random-number-generator.h" |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
138 chunks_queued_for_free_(NULL), | 138 chunks_queued_for_free_(NULL), |
139 gc_callbacks_depth_(0) { | 139 gc_callbacks_depth_(0) { |
140 // Allow build-time customization of the max semispace size. Building | 140 // Allow build-time customization of the max semispace size. Building |
141 // V8 with snapshots and a non-default max semispace size is much | 141 // V8 with snapshots and a non-default max semispace size is much |
142 // easier if you can define it as part of the build environment. | 142 // easier if you can define it as part of the build environment. |
143 #if defined(V8_MAX_SEMISPACE_SIZE) | 143 #if defined(V8_MAX_SEMISPACE_SIZE) |
144 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; | 144 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; |
145 #endif | 145 #endif |
146 | 146 |
147 // Ensure old_generation_size_ is a multiple of kPageSize. | 147 // Ensure old_generation_size_ is a multiple of kPageSize. |
148 ASSERT(MB >= Page::kPageSize); | 148 DCHECK(MB >= Page::kPageSize); |
149 | 149 |
150 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength); | 150 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength); |
151 set_native_contexts_list(NULL); | 151 set_native_contexts_list(NULL); |
152 set_array_buffers_list(Smi::FromInt(0)); | 152 set_array_buffers_list(Smi::FromInt(0)); |
153 set_allocation_sites_list(Smi::FromInt(0)); | 153 set_allocation_sites_list(Smi::FromInt(0)); |
154 set_encountered_weak_collections(Smi::FromInt(0)); | 154 set_encountered_weak_collections(Smi::FromInt(0)); |
155 // Put a dummy entry in the remembered pages so we can find the list the | 155 // Put a dummy entry in the remembered pages so we can find the list the |
156 // minidump even if there are no real unmapped pages. | 156 // minidump even if there are no real unmapped pages. |
157 RememberUnmappedPage(NULL, false); | 157 RememberUnmappedPage(NULL, false); |
158 | 158 |
(...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
429 // Reset GC statistics. | 429 // Reset GC statistics. |
430 promoted_objects_size_ = 0; | 430 promoted_objects_size_ = 0; |
431 semi_space_copied_object_size_ = 0; | 431 semi_space_copied_object_size_ = 0; |
432 nodes_died_in_new_space_ = 0; | 432 nodes_died_in_new_space_ = 0; |
433 nodes_copied_in_new_space_ = 0; | 433 nodes_copied_in_new_space_ = 0; |
434 nodes_promoted_ = 0; | 434 nodes_promoted_ = 0; |
435 | 435 |
436 UpdateMaximumCommitted(); | 436 UpdateMaximumCommitted(); |
437 | 437 |
438 #ifdef DEBUG | 438 #ifdef DEBUG |
439 ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); | 439 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); |
440 | 440 |
441 if (FLAG_gc_verbose) Print(); | 441 if (FLAG_gc_verbose) Print(); |
442 | 442 |
443 ReportStatisticsBeforeGC(); | 443 ReportStatisticsBeforeGC(); |
444 #endif // DEBUG | 444 #endif // DEBUG |
445 | 445 |
446 store_buffer()->GCPrologue(); | 446 store_buffer()->GCPrologue(); |
447 | 447 |
448 if (isolate()->concurrent_osr_enabled()) { | 448 if (isolate()->concurrent_osr_enabled()) { |
449 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs(); | 449 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs(); |
(...skipping 383 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
833 } | 833 } |
834 collector = SCAVENGER; | 834 collector = SCAVENGER; |
835 collector_reason = "incremental marking delaying mark-sweep"; | 835 collector_reason = "incremental marking delaying mark-sweep"; |
836 } | 836 } |
837 } | 837 } |
838 | 838 |
839 bool next_gc_likely_to_collect_more = false; | 839 bool next_gc_likely_to_collect_more = false; |
840 | 840 |
841 { | 841 { |
842 tracer()->Start(collector, gc_reason, collector_reason); | 842 tracer()->Start(collector, gc_reason, collector_reason); |
843 ASSERT(AllowHeapAllocation::IsAllowed()); | 843 DCHECK(AllowHeapAllocation::IsAllowed()); |
844 DisallowHeapAllocation no_allocation_during_gc; | 844 DisallowHeapAllocation no_allocation_during_gc; |
845 GarbageCollectionPrologue(); | 845 GarbageCollectionPrologue(); |
846 | 846 |
847 { | 847 { |
848 HistogramTimerScope histogram_timer_scope( | 848 HistogramTimerScope histogram_timer_scope( |
849 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger() | 849 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger() |
850 : isolate_->counters()->gc_compactor()); | 850 : isolate_->counters()->gc_compactor()); |
851 next_gc_likely_to_collect_more = | 851 next_gc_likely_to_collect_more = |
852 PerformGarbageCollection(collector, gc_callback_flags); | 852 PerformGarbageCollection(collector, gc_callback_flags); |
853 } | 853 } |
(...skipping 25 matching lines...) Expand all Loading... |
879 return ++contexts_disposed_; | 879 return ++contexts_disposed_; |
880 } | 880 } |
881 | 881 |
882 | 882 |
883 void Heap::MoveElements(FixedArray* array, | 883 void Heap::MoveElements(FixedArray* array, |
884 int dst_index, | 884 int dst_index, |
885 int src_index, | 885 int src_index, |
886 int len) { | 886 int len) { |
887 if (len == 0) return; | 887 if (len == 0) return; |
888 | 888 |
889 ASSERT(array->map() != fixed_cow_array_map()); | 889 DCHECK(array->map() != fixed_cow_array_map()); |
890 Object** dst_objects = array->data_start() + dst_index; | 890 Object** dst_objects = array->data_start() + dst_index; |
891 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize); | 891 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize); |
892 if (!InNewSpace(array)) { | 892 if (!InNewSpace(array)) { |
893 for (int i = 0; i < len; i++) { | 893 for (int i = 0; i < len; i++) { |
894 // TODO(hpayer): check store buffer for entries | 894 // TODO(hpayer): check store buffer for entries |
895 if (InNewSpace(dst_objects[i])) { | 895 if (InNewSpace(dst_objects[i])) { |
896 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i)); | 896 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i)); |
897 } | 897 } |
898 } | 898 } |
899 } | 899 } |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
935 return result; | 935 return result; |
936 } | 936 } |
937 | 937 |
938 | 938 |
939 void Heap::ReserveSpace(int *sizes, Address *locations_out) { | 939 void Heap::ReserveSpace(int *sizes, Address *locations_out) { |
940 bool gc_performed = true; | 940 bool gc_performed = true; |
941 int counter = 0; | 941 int counter = 0; |
942 static const int kThreshold = 20; | 942 static const int kThreshold = 20; |
943 while (gc_performed && counter++ < kThreshold) { | 943 while (gc_performed && counter++ < kThreshold) { |
944 gc_performed = false; | 944 gc_performed = false; |
945 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1); | 945 DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1); |
946 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) { | 946 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) { |
947 if (sizes[space] != 0) { | 947 if (sizes[space] != 0) { |
948 AllocationResult allocation; | 948 AllocationResult allocation; |
949 if (space == NEW_SPACE) { | 949 if (space == NEW_SPACE) { |
950 allocation = new_space()->AllocateRaw(sizes[space]); | 950 allocation = new_space()->AllocateRaw(sizes[space]); |
951 } else { | 951 } else { |
952 allocation = paged_space(space)->AllocateRaw(sizes[space]); | 952 allocation = paged_space(space)->AllocateRaw(sizes[space]); |
953 } | 953 } |
954 FreeListNode* node; | 954 FreeListNode* node; |
955 if (!allocation.To(&node)) { | 955 if (!allocation.To(&node)) { |
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1105 } else { | 1105 } else { |
1106 Scavenge(); | 1106 Scavenge(); |
1107 } | 1107 } |
1108 | 1108 |
1109 UpdateSurvivalStatistics(start_new_space_size); | 1109 UpdateSurvivalStatistics(start_new_space_size); |
1110 | 1110 |
1111 isolate_->counters()->objs_since_last_young()->Set(0); | 1111 isolate_->counters()->objs_since_last_young()->Set(0); |
1112 | 1112 |
1113 // Callbacks that fire after this point might trigger nested GCs and | 1113 // Callbacks that fire after this point might trigger nested GCs and |
1114 // restart incremental marking, the assertion can't be moved down. | 1114 // restart incremental marking, the assertion can't be moved down. |
1115 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped()); | 1115 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped()); |
1116 | 1116 |
1117 gc_post_processing_depth_++; | 1117 gc_post_processing_depth_++; |
1118 { AllowHeapAllocation allow_allocation; | 1118 { AllowHeapAllocation allow_allocation; |
1119 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); | 1119 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); |
1120 freed_global_handles = | 1120 freed_global_handles = |
1121 isolate_->global_handles()->PostGarbageCollectionProcessing(collector); | 1121 isolate_->global_handles()->PostGarbageCollectionProcessing(collector); |
1122 } | 1122 } |
1123 gc_post_processing_depth_--; | 1123 gc_post_processing_depth_--; |
1124 | 1124 |
1125 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); | 1125 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); |
(...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1346 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) { | 1346 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) { |
1347 // Did we find too many pointers in the previous page? The heuristic is | 1347 // Did we find too many pointers in the previous page? The heuristic is |
1348 // that no page can take more then 1/5 the remaining slots in the store | 1348 // that no page can take more then 1/5 the remaining slots in the store |
1349 // buffer. | 1349 // buffer. |
1350 current_page_->set_scan_on_scavenge(true); | 1350 current_page_->set_scan_on_scavenge(true); |
1351 store_buffer_->SetTop(start_of_current_page_); | 1351 store_buffer_->SetTop(start_of_current_page_); |
1352 } else { | 1352 } else { |
1353 // In this case the page we scanned took a reasonable number of slots in | 1353 // In this case the page we scanned took a reasonable number of slots in |
1354 // the store buffer. It has now been rehabilitated and is no longer | 1354 // the store buffer. It has now been rehabilitated and is no longer |
1355 // marked scan_on_scavenge. | 1355 // marked scan_on_scavenge. |
1356 ASSERT(!current_page_->scan_on_scavenge()); | 1356 DCHECK(!current_page_->scan_on_scavenge()); |
1357 } | 1357 } |
1358 } | 1358 } |
1359 start_of_current_page_ = store_buffer_->Top(); | 1359 start_of_current_page_ = store_buffer_->Top(); |
1360 current_page_ = page; | 1360 current_page_ = page; |
1361 } else if (event == kStoreBufferFullEvent) { | 1361 } else if (event == kStoreBufferFullEvent) { |
1362 // The current page overflowed the store buffer again. Wipe out its entries | 1362 // The current page overflowed the store buffer again. Wipe out its entries |
1363 // in the store buffer and mark it scan-on-scavenge again. This may happen | 1363 // in the store buffer and mark it scan-on-scavenge again. This may happen |
1364 // several times while scanning. | 1364 // several times while scanning. |
1365 if (current_page_ == NULL) { | 1365 if (current_page_ == NULL) { |
1366 // Store Buffer overflowed while scanning promoted objects. These are not | 1366 // Store Buffer overflowed while scanning promoted objects. These are not |
1367 // in any particular page, though they are likely to be clustered by the | 1367 // in any particular page, though they are likely to be clustered by the |
1368 // allocation routines. | 1368 // allocation routines. |
1369 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2); | 1369 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2); |
1370 } else { | 1370 } else { |
1371 // Store Buffer overflowed while scanning a particular old space page for | 1371 // Store Buffer overflowed while scanning a particular old space page for |
1372 // pointers to new space. | 1372 // pointers to new space. |
1373 ASSERT(current_page_ == page); | 1373 DCHECK(current_page_ == page); |
1374 ASSERT(page != NULL); | 1374 DCHECK(page != NULL); |
1375 current_page_->set_scan_on_scavenge(true); | 1375 current_page_->set_scan_on_scavenge(true); |
1376 ASSERT(start_of_current_page_ != store_buffer_->Top()); | 1376 DCHECK(start_of_current_page_ != store_buffer_->Top()); |
1377 store_buffer_->SetTop(start_of_current_page_); | 1377 store_buffer_->SetTop(start_of_current_page_); |
1378 } | 1378 } |
1379 } else { | 1379 } else { |
1380 UNREACHABLE(); | 1380 UNREACHABLE(); |
1381 } | 1381 } |
1382 } | 1382 } |
1383 | 1383 |
1384 | 1384 |
1385 void PromotionQueue::Initialize() { | 1385 void PromotionQueue::Initialize() { |
1386 // Assumes that a NewSpacePage exactly fits a number of promotion queue | 1386 // Assumes that a NewSpacePage exactly fits a number of promotion queue |
1387 // entries (where each is a pair of intptr_t). This allows us to simplify | 1387 // entries (where each is a pair of intptr_t). This allows us to simplify |
1388 // the test fpr when to switch pages. | 1388 // the test fpr when to switch pages. |
1389 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) | 1389 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) |
1390 == 0); | 1390 == 0); |
1391 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart()); | 1391 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart()); |
1392 front_ = rear_ = | 1392 front_ = rear_ = |
1393 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd()); | 1393 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd()); |
1394 emergency_stack_ = NULL; | 1394 emergency_stack_ = NULL; |
1395 guard_ = false; | 1395 guard_ = false; |
1396 } | 1396 } |
1397 | 1397 |
1398 | 1398 |
1399 void PromotionQueue::RelocateQueueHead() { | 1399 void PromotionQueue::RelocateQueueHead() { |
1400 ASSERT(emergency_stack_ == NULL); | 1400 DCHECK(emergency_stack_ == NULL); |
1401 | 1401 |
1402 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); | 1402 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); |
1403 intptr_t* head_start = rear_; | 1403 intptr_t* head_start = rear_; |
1404 intptr_t* head_end = | 1404 intptr_t* head_end = |
1405 Min(front_, reinterpret_cast<intptr_t*>(p->area_end())); | 1405 Min(front_, reinterpret_cast<intptr_t*>(p->area_end())); |
1406 | 1406 |
1407 int entries_count = | 1407 int entries_count = |
1408 static_cast<int>(head_end - head_start) / kEntrySizeInWords; | 1408 static_cast<int>(head_end - head_start) / kEntrySizeInWords; |
1409 | 1409 |
1410 emergency_stack_ = new List<Entry>(2 * entries_count); | 1410 emergency_stack_ = new List<Entry>(2 * entries_count); |
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1557 UpdateNewSpaceReferencesInExternalStringTable( | 1557 UpdateNewSpaceReferencesInExternalStringTable( |
1558 &UpdateNewSpaceReferenceInExternalStringTableEntry); | 1558 &UpdateNewSpaceReferenceInExternalStringTableEntry); |
1559 | 1559 |
1560 promotion_queue_.Destroy(); | 1560 promotion_queue_.Destroy(); |
1561 | 1561 |
1562 incremental_marking()->UpdateMarkingDequeAfterScavenge(); | 1562 incremental_marking()->UpdateMarkingDequeAfterScavenge(); |
1563 | 1563 |
1564 ScavengeWeakObjectRetainer weak_object_retainer(this); | 1564 ScavengeWeakObjectRetainer weak_object_retainer(this); |
1565 ProcessWeakReferences(&weak_object_retainer); | 1565 ProcessWeakReferences(&weak_object_retainer); |
1566 | 1566 |
1567 ASSERT(new_space_front == new_space_.top()); | 1567 DCHECK(new_space_front == new_space_.top()); |
1568 | 1568 |
1569 // Set age mark. | 1569 // Set age mark. |
1570 new_space_.set_age_mark(new_space_.top()); | 1570 new_space_.set_age_mark(new_space_.top()); |
1571 | 1571 |
1572 new_space_.LowerInlineAllocationLimit( | 1572 new_space_.LowerInlineAllocationLimit( |
1573 new_space_.inline_allocation_limit_step()); | 1573 new_space_.inline_allocation_limit_step()); |
1574 | 1574 |
1575 // Update how much has survived scavenge. | 1575 // Update how much has survived scavenge. |
1576 IncrementYoungSurvivorsCounter(static_cast<int>( | 1576 IncrementYoungSurvivorsCounter(static_cast<int>( |
1577 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size())); | 1577 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size())); |
(...skipping 29 matching lines...) Expand all Loading... |
1607 } | 1607 } |
1608 #endif | 1608 #endif |
1609 | 1609 |
1610 if (external_string_table_.new_space_strings_.is_empty()) return; | 1610 if (external_string_table_.new_space_strings_.is_empty()) return; |
1611 | 1611 |
1612 Object** start = &external_string_table_.new_space_strings_[0]; | 1612 Object** start = &external_string_table_.new_space_strings_[0]; |
1613 Object** end = start + external_string_table_.new_space_strings_.length(); | 1613 Object** end = start + external_string_table_.new_space_strings_.length(); |
1614 Object** last = start; | 1614 Object** last = start; |
1615 | 1615 |
1616 for (Object** p = start; p < end; ++p) { | 1616 for (Object** p = start; p < end; ++p) { |
1617 ASSERT(InFromSpace(*p)); | 1617 DCHECK(InFromSpace(*p)); |
1618 String* target = updater_func(this, p); | 1618 String* target = updater_func(this, p); |
1619 | 1619 |
1620 if (target == NULL) continue; | 1620 if (target == NULL) continue; |
1621 | 1621 |
1622 ASSERT(target->IsExternalString()); | 1622 DCHECK(target->IsExternalString()); |
1623 | 1623 |
1624 if (InNewSpace(target)) { | 1624 if (InNewSpace(target)) { |
1625 // String is still in new space. Update the table entry. | 1625 // String is still in new space. Update the table entry. |
1626 *last = target; | 1626 *last = target; |
1627 ++last; | 1627 ++last; |
1628 } else { | 1628 } else { |
1629 // String got promoted. Move it to the old string list. | 1629 // String got promoted. Move it to the old string list. |
1630 external_string_table_.AddOldString(target); | 1630 external_string_table_.AddOldString(target); |
1631 } | 1631 } |
1632 } | 1632 } |
1633 | 1633 |
1634 ASSERT(last <= end); | 1634 DCHECK(last <= end); |
1635 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start)); | 1635 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start)); |
1636 } | 1636 } |
1637 | 1637 |
1638 | 1638 |
1639 void Heap::UpdateReferencesInExternalStringTable( | 1639 void Heap::UpdateReferencesInExternalStringTable( |
1640 ExternalStringTableUpdaterCallback updater_func) { | 1640 ExternalStringTableUpdaterCallback updater_func) { |
1641 | 1641 |
1642 // Update old space string references. | 1642 // Update old space string references. |
1643 if (external_string_table_.old_space_strings_.length() > 0) { | 1643 if (external_string_table_.old_space_strings_.length() > 0) { |
1644 Object** start = &external_string_table_.old_space_strings_[0]; | 1644 Object** start = &external_string_table_.old_space_strings_[0]; |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1732 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { | 1732 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { |
1733 DisallowHeapAllocation no_allocation; | 1733 DisallowHeapAllocation no_allocation; |
1734 // All external strings are listed in the external string table. | 1734 // All external strings are listed in the external string table. |
1735 | 1735 |
1736 class ExternalStringTableVisitorAdapter : public ObjectVisitor { | 1736 class ExternalStringTableVisitorAdapter : public ObjectVisitor { |
1737 public: | 1737 public: |
1738 explicit ExternalStringTableVisitorAdapter( | 1738 explicit ExternalStringTableVisitorAdapter( |
1739 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {} | 1739 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {} |
1740 virtual void VisitPointers(Object** start, Object** end) { | 1740 virtual void VisitPointers(Object** start, Object** end) { |
1741 for (Object** p = start; p < end; p++) { | 1741 for (Object** p = start; p < end; p++) { |
1742 ASSERT((*p)->IsExternalString()); | 1742 DCHECK((*p)->IsExternalString()); |
1743 visitor_->VisitExternalString(Utils::ToLocal( | 1743 visitor_->VisitExternalString(Utils::ToLocal( |
1744 Handle<String>(String::cast(*p)))); | 1744 Handle<String>(String::cast(*p)))); |
1745 } | 1745 } |
1746 } | 1746 } |
1747 private: | 1747 private: |
1748 v8::ExternalResourceVisitor* visitor_; | 1748 v8::ExternalResourceVisitor* visitor_; |
1749 } external_string_table_visitor(visitor); | 1749 } external_string_table_visitor(visitor); |
1750 | 1750 |
1751 external_string_table_.Iterate(&external_string_table_visitor); | 1751 external_string_table_.Iterate(&external_string_table_visitor); |
1752 } | 1752 } |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1788 &ScavengeStoreBufferCallback); | 1788 &ScavengeStoreBufferCallback); |
1789 while (!promotion_queue()->is_empty()) { | 1789 while (!promotion_queue()->is_empty()) { |
1790 HeapObject* target; | 1790 HeapObject* target; |
1791 int size; | 1791 int size; |
1792 promotion_queue()->remove(&target, &size); | 1792 promotion_queue()->remove(&target, &size); |
1793 | 1793 |
1794 // Promoted object might be already partially visited | 1794 // Promoted object might be already partially visited |
1795 // during old space pointer iteration. Thus we search specificly | 1795 // during old space pointer iteration. Thus we search specificly |
1796 // for pointers to from semispace instead of looking for pointers | 1796 // for pointers to from semispace instead of looking for pointers |
1797 // to new space. | 1797 // to new space. |
1798 ASSERT(!target->IsMap()); | 1798 DCHECK(!target->IsMap()); |
1799 IterateAndMarkPointersToFromSpace(target->address(), | 1799 IterateAndMarkPointersToFromSpace(target->address(), |
1800 target->address() + size, | 1800 target->address() + size, |
1801 &ScavengeObject); | 1801 &ScavengeObject); |
1802 } | 1802 } |
1803 } | 1803 } |
1804 | 1804 |
1805 // Take another spin if there are now unswept objects in new space | 1805 // Take another spin if there are now unswept objects in new space |
1806 // (there are currently no more unswept promoted objects). | 1806 // (there are currently no more unswept promoted objects). |
1807 } while (new_space_front != new_space_.top()); | 1807 } while (new_space_front != new_space_.top()); |
1808 | 1808 |
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1945 // Helper function used by CopyObject to copy a source object to an | 1945 // Helper function used by CopyObject to copy a source object to an |
1946 // allocated target object and update the forwarding pointer in the source | 1946 // allocated target object and update the forwarding pointer in the source |
1947 // object. Returns the target object. | 1947 // object. Returns the target object. |
1948 INLINE(static void MigrateObject(Heap* heap, | 1948 INLINE(static void MigrateObject(Heap* heap, |
1949 HeapObject* source, | 1949 HeapObject* source, |
1950 HeapObject* target, | 1950 HeapObject* target, |
1951 int size)) { | 1951 int size)) { |
1952 // If we migrate into to-space, then the to-space top pointer should be | 1952 // If we migrate into to-space, then the to-space top pointer should be |
1953 // right after the target object. Incorporate double alignment | 1953 // right after the target object. Incorporate double alignment |
1954 // over-allocation. | 1954 // over-allocation. |
1955 ASSERT(!heap->InToSpace(target) || | 1955 DCHECK(!heap->InToSpace(target) || |
1956 target->address() + size == heap->new_space()->top() || | 1956 target->address() + size == heap->new_space()->top() || |
1957 target->address() + size + kPointerSize == heap->new_space()->top()); | 1957 target->address() + size + kPointerSize == heap->new_space()->top()); |
1958 | 1958 |
1959 // Make sure that we do not overwrite the promotion queue which is at | 1959 // Make sure that we do not overwrite the promotion queue which is at |
1960 // the end of to-space. | 1960 // the end of to-space. |
1961 ASSERT(!heap->InToSpace(target) || | 1961 DCHECK(!heap->InToSpace(target) || |
1962 heap->promotion_queue()->IsBelowPromotionQueue( | 1962 heap->promotion_queue()->IsBelowPromotionQueue( |
1963 heap->new_space()->top())); | 1963 heap->new_space()->top())); |
1964 | 1964 |
1965 // Copy the content of source to target. | 1965 // Copy the content of source to target. |
1966 heap->CopyBlock(target->address(), source->address(), size); | 1966 heap->CopyBlock(target->address(), source->address(), size); |
1967 | 1967 |
1968 // Set the forwarding address. | 1968 // Set the forwarding address. |
1969 source->set_map_word(MapWord::FromForwardingAddress(target)); | 1969 source->set_map_word(MapWord::FromForwardingAddress(target)); |
1970 | 1970 |
1971 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) { | 1971 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) { |
(...skipping 11 matching lines...) Expand all Loading... |
1983 | 1983 |
1984 template<int alignment> | 1984 template<int alignment> |
1985 static inline bool SemiSpaceCopyObject(Map* map, | 1985 static inline bool SemiSpaceCopyObject(Map* map, |
1986 HeapObject** slot, | 1986 HeapObject** slot, |
1987 HeapObject* object, | 1987 HeapObject* object, |
1988 int object_size) { | 1988 int object_size) { |
1989 Heap* heap = map->GetHeap(); | 1989 Heap* heap = map->GetHeap(); |
1990 | 1990 |
1991 int allocation_size = object_size; | 1991 int allocation_size = object_size; |
1992 if (alignment != kObjectAlignment) { | 1992 if (alignment != kObjectAlignment) { |
1993 ASSERT(alignment == kDoubleAlignment); | 1993 DCHECK(alignment == kDoubleAlignment); |
1994 allocation_size += kPointerSize; | 1994 allocation_size += kPointerSize; |
1995 } | 1995 } |
1996 | 1996 |
1997 ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE)); | 1997 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE)); |
1998 AllocationResult allocation = | 1998 AllocationResult allocation = |
1999 heap->new_space()->AllocateRaw(allocation_size); | 1999 heap->new_space()->AllocateRaw(allocation_size); |
2000 | 2000 |
2001 HeapObject* target = NULL; // Initialization to please compiler. | 2001 HeapObject* target = NULL; // Initialization to please compiler. |
2002 if (allocation.To(&target)) { | 2002 if (allocation.To(&target)) { |
2003 if (alignment != kObjectAlignment) { | 2003 if (alignment != kObjectAlignment) { |
2004 target = EnsureDoubleAligned(heap, target, allocation_size); | 2004 target = EnsureDoubleAligned(heap, target, allocation_size); |
2005 } | 2005 } |
2006 | 2006 |
2007 // Order is important here: Set the promotion limit before migrating | 2007 // Order is important here: Set the promotion limit before migrating |
(...skipping 16 matching lines...) Expand all Loading... |
2024 | 2024 |
2025 template<ObjectContents object_contents, int alignment> | 2025 template<ObjectContents object_contents, int alignment> |
2026 static inline bool PromoteObject(Map* map, | 2026 static inline bool PromoteObject(Map* map, |
2027 HeapObject** slot, | 2027 HeapObject** slot, |
2028 HeapObject* object, | 2028 HeapObject* object, |
2029 int object_size) { | 2029 int object_size) { |
2030 Heap* heap = map->GetHeap(); | 2030 Heap* heap = map->GetHeap(); |
2031 | 2031 |
2032 int allocation_size = object_size; | 2032 int allocation_size = object_size; |
2033 if (alignment != kObjectAlignment) { | 2033 if (alignment != kObjectAlignment) { |
2034 ASSERT(alignment == kDoubleAlignment); | 2034 DCHECK(alignment == kDoubleAlignment); |
2035 allocation_size += kPointerSize; | 2035 allocation_size += kPointerSize; |
2036 } | 2036 } |
2037 | 2037 |
2038 AllocationResult allocation; | 2038 AllocationResult allocation; |
2039 if (object_contents == DATA_OBJECT) { | 2039 if (object_contents == DATA_OBJECT) { |
2040 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); | 2040 DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); |
2041 allocation = heap->old_data_space()->AllocateRaw(allocation_size); | 2041 allocation = heap->old_data_space()->AllocateRaw(allocation_size); |
2042 } else { | 2042 } else { |
2043 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); | 2043 DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); |
2044 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); | 2044 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); |
2045 } | 2045 } |
2046 | 2046 |
2047 HeapObject* target = NULL; // Initialization to please compiler. | 2047 HeapObject* target = NULL; // Initialization to please compiler. |
2048 if (allocation.To(&target)) { | 2048 if (allocation.To(&target)) { |
2049 if (alignment != kObjectAlignment) { | 2049 if (alignment != kObjectAlignment) { |
2050 target = EnsureDoubleAligned(heap, target, allocation_size); | 2050 target = EnsureDoubleAligned(heap, target, allocation_size); |
2051 } | 2051 } |
2052 | 2052 |
2053 // Order is important: slot might be inside of the target if target | 2053 // Order is important: slot might be inside of the target if target |
(...skipping 15 matching lines...) Expand all Loading... |
2069 } | 2069 } |
2070 return false; | 2070 return false; |
2071 } | 2071 } |
2072 | 2072 |
2073 | 2073 |
2074 template<ObjectContents object_contents, int alignment> | 2074 template<ObjectContents object_contents, int alignment> |
2075 static inline void EvacuateObject(Map* map, | 2075 static inline void EvacuateObject(Map* map, |
2076 HeapObject** slot, | 2076 HeapObject** slot, |
2077 HeapObject* object, | 2077 HeapObject* object, |
2078 int object_size) { | 2078 int object_size) { |
2079 SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); | 2079 SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); |
2080 SLOW_ASSERT(object->Size() == object_size); | 2080 SLOW_DCHECK(object->Size() == object_size); |
2081 Heap* heap = map->GetHeap(); | 2081 Heap* heap = map->GetHeap(); |
2082 | 2082 |
2083 if (!heap->ShouldBePromoted(object->address(), object_size)) { | 2083 if (!heap->ShouldBePromoted(object->address(), object_size)) { |
2084 // A semi-space copy may fail due to fragmentation. In that case, we | 2084 // A semi-space copy may fail due to fragmentation. In that case, we |
2085 // try to promote the object. | 2085 // try to promote the object. |
2086 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) { | 2086 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) { |
2087 return; | 2087 return; |
2088 } | 2088 } |
2089 } | 2089 } |
2090 | 2090 |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2184 int object_size = SeqTwoByteString::cast(object)-> | 2184 int object_size = SeqTwoByteString::cast(object)-> |
2185 SeqTwoByteStringSize(map->instance_type()); | 2185 SeqTwoByteStringSize(map->instance_type()); |
2186 EvacuateObject<DATA_OBJECT, kObjectAlignment>( | 2186 EvacuateObject<DATA_OBJECT, kObjectAlignment>( |
2187 map, slot, object, object_size); | 2187 map, slot, object, object_size); |
2188 } | 2188 } |
2189 | 2189 |
2190 | 2190 |
2191 static inline void EvacuateShortcutCandidate(Map* map, | 2191 static inline void EvacuateShortcutCandidate(Map* map, |
2192 HeapObject** slot, | 2192 HeapObject** slot, |
2193 HeapObject* object) { | 2193 HeapObject* object) { |
2194 ASSERT(IsShortcutCandidate(map->instance_type())); | 2194 DCHECK(IsShortcutCandidate(map->instance_type())); |
2195 | 2195 |
2196 Heap* heap = map->GetHeap(); | 2196 Heap* heap = map->GetHeap(); |
2197 | 2197 |
2198 if (marks_handling == IGNORE_MARKS && | 2198 if (marks_handling == IGNORE_MARKS && |
2199 ConsString::cast(object)->unchecked_second() == | 2199 ConsString::cast(object)->unchecked_second() == |
2200 heap->empty_string()) { | 2200 heap->empty_string()) { |
2201 HeapObject* first = | 2201 HeapObject* first = |
2202 HeapObject::cast(ConsString::cast(object)->unchecked_first()); | 2202 HeapObject::cast(ConsString::cast(object)->unchecked_first()); |
2203 | 2203 |
2204 *slot = first; | 2204 *slot = first; |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2304 scavenging_visitors_table_.Register( | 2304 scavenging_visitors_table_.Register( |
2305 StaticVisitorBase::kVisitShortcutCandidate, | 2305 StaticVisitorBase::kVisitShortcutCandidate, |
2306 scavenging_visitors_table_.GetVisitorById( | 2306 scavenging_visitors_table_.GetVisitorById( |
2307 StaticVisitorBase::kVisitConsString)); | 2307 StaticVisitorBase::kVisitConsString)); |
2308 } | 2308 } |
2309 } | 2309 } |
2310 } | 2310 } |
2311 | 2311 |
2312 | 2312 |
2313 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { | 2313 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { |
2314 SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object)); | 2314 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object)); |
2315 MapWord first_word = object->map_word(); | 2315 MapWord first_word = object->map_word(); |
2316 SLOW_ASSERT(!first_word.IsForwardingAddress()); | 2316 SLOW_DCHECK(!first_word.IsForwardingAddress()); |
2317 Map* map = first_word.ToMap(); | 2317 Map* map = first_word.ToMap(); |
2318 map->GetHeap()->DoScavengeObject(map, p, object); | 2318 map->GetHeap()->DoScavengeObject(map, p, object); |
2319 } | 2319 } |
2320 | 2320 |
2321 | 2321 |
2322 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type, | 2322 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type, |
2323 int instance_size) { | 2323 int instance_size) { |
2324 Object* result; | 2324 Object* result; |
2325 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); | 2325 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); |
2326 if (!allocation.To(&result)) return allocation; | 2326 if (!allocation.To(&result)) return allocation; |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2379 | 2379 |
2380 AllocationResult Heap::AllocateFillerObject(int size, | 2380 AllocationResult Heap::AllocateFillerObject(int size, |
2381 bool double_align, | 2381 bool double_align, |
2382 AllocationSpace space) { | 2382 AllocationSpace space) { |
2383 HeapObject* obj; | 2383 HeapObject* obj; |
2384 { AllocationResult allocation = AllocateRaw(size, space, space); | 2384 { AllocationResult allocation = AllocateRaw(size, space, space); |
2385 if (!allocation.To(&obj)) return allocation; | 2385 if (!allocation.To(&obj)) return allocation; |
2386 } | 2386 } |
2387 #ifdef DEBUG | 2387 #ifdef DEBUG |
2388 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | 2388 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
2389 ASSERT(chunk->owner()->identity() == space); | 2389 DCHECK(chunk->owner()->identity() == space); |
2390 #endif | 2390 #endif |
2391 CreateFillerObjectAt(obj->address(), size); | 2391 CreateFillerObjectAt(obj->address(), size); |
2392 return obj; | 2392 return obj; |
2393 } | 2393 } |
2394 | 2394 |
2395 | 2395 |
2396 const Heap::StringTypeTable Heap::string_type_table[] = { | 2396 const Heap::StringTypeTable Heap::string_type_table[] = { |
2397 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \ | 2397 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \ |
2398 {type, size, k##camel_name##MapRootIndex}, | 2398 {type, size, k##camel_name##MapRootIndex}, |
2399 STRING_TYPE_LIST(STRING_TYPE_ELEMENT) | 2399 STRING_TYPE_LIST(STRING_TYPE_ELEMENT) |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2453 if (!allocation.To(&obj)) return false; | 2453 if (!allocation.To(&obj)) return false; |
2454 } | 2454 } |
2455 set_null_value(Oddball::cast(obj)); | 2455 set_null_value(Oddball::cast(obj)); |
2456 Oddball::cast(obj)->set_kind(Oddball::kNull); | 2456 Oddball::cast(obj)->set_kind(Oddball::kNull); |
2457 | 2457 |
2458 { AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE); | 2458 { AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE); |
2459 if (!allocation.To(&obj)) return false; | 2459 if (!allocation.To(&obj)) return false; |
2460 } | 2460 } |
2461 set_undefined_value(Oddball::cast(obj)); | 2461 set_undefined_value(Oddball::cast(obj)); |
2462 Oddball::cast(obj)->set_kind(Oddball::kUndefined); | 2462 Oddball::cast(obj)->set_kind(Oddball::kUndefined); |
2463 ASSERT(!InNewSpace(undefined_value())); | 2463 DCHECK(!InNewSpace(undefined_value())); |
2464 | 2464 |
2465 // Set preliminary exception sentinel value before actually initializing it. | 2465 // Set preliminary exception sentinel value before actually initializing it. |
2466 set_exception(null_value()); | 2466 set_exception(null_value()); |
2467 | 2467 |
2468 // Allocate the empty descriptor array. | 2468 // Allocate the empty descriptor array. |
2469 { AllocationResult allocation = AllocateEmptyFixedArray(); | 2469 { AllocationResult allocation = AllocateEmptyFixedArray(); |
2470 if (!allocation.To(&obj)) return false; | 2470 if (!allocation.To(&obj)) return false; |
2471 } | 2471 } |
2472 set_empty_descriptor_array(DescriptorArray::cast(obj)); | 2472 set_empty_descriptor_array(DescriptorArray::cast(obj)); |
2473 | 2473 |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2525 #define ALLOCATE_MAP(instance_type, size, field_name) \ | 2525 #define ALLOCATE_MAP(instance_type, size, field_name) \ |
2526 { Map* map; \ | 2526 { Map* map; \ |
2527 if (!AllocateMap((instance_type), size).To(&map)) return false; \ | 2527 if (!AllocateMap((instance_type), size).To(&map)) return false; \ |
2528 set_##field_name##_map(map); \ | 2528 set_##field_name##_map(map); \ |
2529 } | 2529 } |
2530 | 2530 |
2531 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \ | 2531 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \ |
2532 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name) | 2532 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name) |
2533 | 2533 |
2534 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array) | 2534 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array) |
2535 ASSERT(fixed_array_map() != fixed_cow_array_map()); | 2535 DCHECK(fixed_array_map() != fixed_cow_array_map()); |
2536 | 2536 |
2537 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info) | 2537 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info) |
2538 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number) | 2538 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number) |
2539 ALLOCATE_MAP( | 2539 ALLOCATE_MAP( |
2540 MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize, mutable_heap_number) | 2540 MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize, mutable_heap_number) |
2541 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol) | 2541 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol) |
2542 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign) | 2542 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign) |
2543 | 2543 |
2544 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole); | 2544 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole); |
2545 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean); | 2545 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean); |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2649 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \ | 2649 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \ |
2650 { FixedTypedArrayBase* obj; \ | 2650 { FixedTypedArrayBase* obj; \ |
2651 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \ | 2651 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \ |
2652 return false; \ | 2652 return false; \ |
2653 set_empty_fixed_##type##_array(obj); \ | 2653 set_empty_fixed_##type##_array(obj); \ |
2654 } | 2654 } |
2655 | 2655 |
2656 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY) | 2656 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY) |
2657 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY | 2657 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY |
2658 } | 2658 } |
2659 ASSERT(!InNewSpace(empty_fixed_array())); | 2659 DCHECK(!InNewSpace(empty_fixed_array())); |
2660 return true; | 2660 return true; |
2661 } | 2661 } |
2662 | 2662 |
2663 | 2663 |
2664 AllocationResult Heap::AllocateHeapNumber(double value, | 2664 AllocationResult Heap::AllocateHeapNumber(double value, |
2665 MutableMode mode, | 2665 MutableMode mode, |
2666 PretenureFlag pretenure) { | 2666 PretenureFlag pretenure) { |
2667 // Statically ensure that it is safe to allocate heap numbers in paged | 2667 // Statically ensure that it is safe to allocate heap numbers in paged |
2668 // spaces. | 2668 // spaces. |
2669 int size = HeapNumber::kSize; | 2669 int size = HeapNumber::kSize; |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2778 Heap::CreateJSConstructEntryStub(); | 2778 Heap::CreateJSConstructEntryStub(); |
2779 } | 2779 } |
2780 | 2780 |
2781 | 2781 |
2782 void Heap::CreateInitialObjects() { | 2782 void Heap::CreateInitialObjects() { |
2783 HandleScope scope(isolate()); | 2783 HandleScope scope(isolate()); |
2784 Factory* factory = isolate()->factory(); | 2784 Factory* factory = isolate()->factory(); |
2785 | 2785 |
2786 // The -0 value must be set before NewNumber works. | 2786 // The -0 value must be set before NewNumber works. |
2787 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED)); | 2787 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED)); |
2788 ASSERT(std::signbit(minus_zero_value()->Number()) != 0); | 2788 DCHECK(std::signbit(minus_zero_value()->Number()) != 0); |
2789 | 2789 |
2790 set_nan_value( | 2790 set_nan_value( |
2791 *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED)); | 2791 *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED)); |
2792 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED)); | 2792 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED)); |
2793 | 2793 |
2794 // The hole has not been created yet, but we want to put something | 2794 // The hole has not been created yet, but we want to put something |
2795 // predictable in the gaps in the string table, so lets make that Smi zero. | 2795 // predictable in the gaps in the string table, so lets make that Smi zero. |
2796 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0))); | 2796 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0))); |
2797 | 2797 |
2798 // Allocate initial string table. | 2798 // Allocate initial string table. |
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2997 } | 2997 } |
2998 | 2998 |
2999 | 2999 |
3000 Object* RegExpResultsCache::Lookup(Heap* heap, | 3000 Object* RegExpResultsCache::Lookup(Heap* heap, |
3001 String* key_string, | 3001 String* key_string, |
3002 Object* key_pattern, | 3002 Object* key_pattern, |
3003 ResultsCacheType type) { | 3003 ResultsCacheType type) { |
3004 FixedArray* cache; | 3004 FixedArray* cache; |
3005 if (!key_string->IsInternalizedString()) return Smi::FromInt(0); | 3005 if (!key_string->IsInternalizedString()) return Smi::FromInt(0); |
3006 if (type == STRING_SPLIT_SUBSTRINGS) { | 3006 if (type == STRING_SPLIT_SUBSTRINGS) { |
3007 ASSERT(key_pattern->IsString()); | 3007 DCHECK(key_pattern->IsString()); |
3008 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0); | 3008 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0); |
3009 cache = heap->string_split_cache(); | 3009 cache = heap->string_split_cache(); |
3010 } else { | 3010 } else { |
3011 ASSERT(type == REGEXP_MULTIPLE_INDICES); | 3011 DCHECK(type == REGEXP_MULTIPLE_INDICES); |
3012 ASSERT(key_pattern->IsFixedArray()); | 3012 DCHECK(key_pattern->IsFixedArray()); |
3013 cache = heap->regexp_multiple_cache(); | 3013 cache = heap->regexp_multiple_cache(); |
3014 } | 3014 } |
3015 | 3015 |
3016 uint32_t hash = key_string->Hash(); | 3016 uint32_t hash = key_string->Hash(); |
3017 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) & | 3017 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) & |
3018 ~(kArrayEntriesPerCacheEntry - 1)); | 3018 ~(kArrayEntriesPerCacheEntry - 1)); |
3019 if (cache->get(index + kStringOffset) == key_string && | 3019 if (cache->get(index + kStringOffset) == key_string && |
3020 cache->get(index + kPatternOffset) == key_pattern) { | 3020 cache->get(index + kPatternOffset) == key_pattern) { |
3021 return cache->get(index + kArrayOffset); | 3021 return cache->get(index + kArrayOffset); |
3022 } | 3022 } |
3023 index = | 3023 index = |
3024 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1)); | 3024 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1)); |
3025 if (cache->get(index + kStringOffset) == key_string && | 3025 if (cache->get(index + kStringOffset) == key_string && |
3026 cache->get(index + kPatternOffset) == key_pattern) { | 3026 cache->get(index + kPatternOffset) == key_pattern) { |
3027 return cache->get(index + kArrayOffset); | 3027 return cache->get(index + kArrayOffset); |
3028 } | 3028 } |
3029 return Smi::FromInt(0); | 3029 return Smi::FromInt(0); |
3030 } | 3030 } |
3031 | 3031 |
3032 | 3032 |
3033 void RegExpResultsCache::Enter(Isolate* isolate, | 3033 void RegExpResultsCache::Enter(Isolate* isolate, |
3034 Handle<String> key_string, | 3034 Handle<String> key_string, |
3035 Handle<Object> key_pattern, | 3035 Handle<Object> key_pattern, |
3036 Handle<FixedArray> value_array, | 3036 Handle<FixedArray> value_array, |
3037 ResultsCacheType type) { | 3037 ResultsCacheType type) { |
3038 Factory* factory = isolate->factory(); | 3038 Factory* factory = isolate->factory(); |
3039 Handle<FixedArray> cache; | 3039 Handle<FixedArray> cache; |
3040 if (!key_string->IsInternalizedString()) return; | 3040 if (!key_string->IsInternalizedString()) return; |
3041 if (type == STRING_SPLIT_SUBSTRINGS) { | 3041 if (type == STRING_SPLIT_SUBSTRINGS) { |
3042 ASSERT(key_pattern->IsString()); | 3042 DCHECK(key_pattern->IsString()); |
3043 if (!key_pattern->IsInternalizedString()) return; | 3043 if (!key_pattern->IsInternalizedString()) return; |
3044 cache = factory->string_split_cache(); | 3044 cache = factory->string_split_cache(); |
3045 } else { | 3045 } else { |
3046 ASSERT(type == REGEXP_MULTIPLE_INDICES); | 3046 DCHECK(type == REGEXP_MULTIPLE_INDICES); |
3047 ASSERT(key_pattern->IsFixedArray()); | 3047 DCHECK(key_pattern->IsFixedArray()); |
3048 cache = factory->regexp_multiple_cache(); | 3048 cache = factory->regexp_multiple_cache(); |
3049 } | 3049 } |
3050 | 3050 |
3051 uint32_t hash = key_string->Hash(); | 3051 uint32_t hash = key_string->Hash(); |
3052 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) & | 3052 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) & |
3053 ~(kArrayEntriesPerCacheEntry - 1)); | 3053 ~(kArrayEntriesPerCacheEntry - 1)); |
3054 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) { | 3054 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) { |
3055 cache->set(index + kStringOffset, *key_string); | 3055 cache->set(index + kStringOffset, *key_string); |
3056 cache->set(index + kPatternOffset, *key_pattern); | 3056 cache->set(index + kPatternOffset, *key_pattern); |
3057 cache->set(index + kArrayOffset, *value_array); | 3057 cache->set(index + kArrayOffset, *value_array); |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3116 | 3116 |
3117 void Heap::FlushAllocationSitesScratchpad() { | 3117 void Heap::FlushAllocationSitesScratchpad() { |
3118 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) { | 3118 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) { |
3119 allocation_sites_scratchpad()->set_undefined(i); | 3119 allocation_sites_scratchpad()->set_undefined(i); |
3120 } | 3120 } |
3121 allocation_sites_scratchpad_length_ = 0; | 3121 allocation_sites_scratchpad_length_ = 0; |
3122 } | 3122 } |
3123 | 3123 |
3124 | 3124 |
3125 void Heap::InitializeAllocationSitesScratchpad() { | 3125 void Heap::InitializeAllocationSitesScratchpad() { |
3126 ASSERT(allocation_sites_scratchpad()->length() == | 3126 DCHECK(allocation_sites_scratchpad()->length() == |
3127 kAllocationSiteScratchpadSize); | 3127 kAllocationSiteScratchpadSize); |
3128 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) { | 3128 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) { |
3129 allocation_sites_scratchpad()->set_undefined(i); | 3129 allocation_sites_scratchpad()->set_undefined(i); |
3130 } | 3130 } |
3131 } | 3131 } |
3132 | 3132 |
3133 | 3133 |
3134 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site, | 3134 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site, |
3135 ScratchpadSlotMode mode) { | 3135 ScratchpadSlotMode mode) { |
3136 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) { | 3136 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) { |
(...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3383 | 3383 |
3384 object->set_map(MapForFixedTypedArray(array_type)); | 3384 object->set_map(MapForFixedTypedArray(array_type)); |
3385 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object); | 3385 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object); |
3386 elements->set_length(length); | 3386 elements->set_length(length); |
3387 memset(elements->DataPtr(), 0, elements->DataSize()); | 3387 memset(elements->DataPtr(), 0, elements->DataSize()); |
3388 return elements; | 3388 return elements; |
3389 } | 3389 } |
3390 | 3390 |
3391 | 3391 |
3392 AllocationResult Heap::AllocateCode(int object_size, bool immovable) { | 3392 AllocationResult Heap::AllocateCode(int object_size, bool immovable) { |
3393 ASSERT(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment)); | 3393 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment)); |
3394 AllocationResult allocation = | 3394 AllocationResult allocation = |
3395 AllocateRaw(object_size, CODE_SPACE, CODE_SPACE); | 3395 AllocateRaw(object_size, CODE_SPACE, CODE_SPACE); |
3396 | 3396 |
3397 HeapObject* result; | 3397 HeapObject* result; |
3398 if (!allocation.To(&result)) return allocation; | 3398 if (!allocation.To(&result)) return allocation; |
3399 | 3399 |
3400 if (immovable) { | 3400 if (immovable) { |
3401 Address address = result->address(); | 3401 Address address = result->address(); |
3402 // Code objects which should stay at a fixed address are allocated either | 3402 // Code objects which should stay at a fixed address are allocated either |
3403 // in the first page of code space (objects on the first page of each space | 3403 // in the first page of code space (objects on the first page of each space |
3404 // are never moved) or in large object space. | 3404 // are never moved) or in large object space. |
3405 if (!code_space_->FirstPage()->Contains(address) && | 3405 if (!code_space_->FirstPage()->Contains(address) && |
3406 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) { | 3406 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) { |
3407 // Discard the first code allocation, which was on a page where it could | 3407 // Discard the first code allocation, which was on a page where it could |
3408 // be moved. | 3408 // be moved. |
3409 CreateFillerObjectAt(result->address(), object_size); | 3409 CreateFillerObjectAt(result->address(), object_size); |
3410 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE); | 3410 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE); |
3411 if (!allocation.To(&result)) return allocation; | 3411 if (!allocation.To(&result)) return allocation; |
3412 OnAllocationEvent(result, object_size); | 3412 OnAllocationEvent(result, object_size); |
3413 } | 3413 } |
3414 } | 3414 } |
3415 | 3415 |
3416 result->set_map_no_write_barrier(code_map()); | 3416 result->set_map_no_write_barrier(code_map()); |
3417 Code* code = Code::cast(result); | 3417 Code* code = Code::cast(result); |
3418 ASSERT(isolate_->code_range() == NULL || | 3418 DCHECK(isolate_->code_range() == NULL || |
3419 !isolate_->code_range()->valid() || | 3419 !isolate_->code_range()->valid() || |
3420 isolate_->code_range()->contains(code->address())); | 3420 isolate_->code_range()->contains(code->address())); |
3421 code->set_gc_metadata(Smi::FromInt(0)); | 3421 code->set_gc_metadata(Smi::FromInt(0)); |
3422 code->set_ic_age(global_ic_age_); | 3422 code->set_ic_age(global_ic_age_); |
3423 return code; | 3423 return code; |
3424 } | 3424 } |
3425 | 3425 |
3426 | 3426 |
3427 AllocationResult Heap::CopyCode(Code* code) { | 3427 AllocationResult Heap::CopyCode(Code* code) { |
3428 AllocationResult allocation; | 3428 AllocationResult allocation; |
(...skipping 17 matching lines...) Expand all Loading... |
3446 // Copy code object. | 3446 // Copy code object. |
3447 Address old_addr = code->address(); | 3447 Address old_addr = code->address(); |
3448 Address new_addr = result->address(); | 3448 Address new_addr = result->address(); |
3449 CopyBlock(new_addr, old_addr, obj_size); | 3449 CopyBlock(new_addr, old_addr, obj_size); |
3450 Code* new_code = Code::cast(result); | 3450 Code* new_code = Code::cast(result); |
3451 | 3451 |
3452 // Update the constant pool. | 3452 // Update the constant pool. |
3453 new_code->set_constant_pool(new_constant_pool); | 3453 new_code->set_constant_pool(new_constant_pool); |
3454 | 3454 |
3455 // Relocate the copy. | 3455 // Relocate the copy. |
3456 ASSERT(isolate_->code_range() == NULL || | 3456 DCHECK(isolate_->code_range() == NULL || |
3457 !isolate_->code_range()->valid() || | 3457 !isolate_->code_range()->valid() || |
3458 isolate_->code_range()->contains(code->address())); | 3458 isolate_->code_range()->contains(code->address())); |
3459 new_code->Relocate(new_addr - old_addr); | 3459 new_code->Relocate(new_addr - old_addr); |
3460 return new_code; | 3460 return new_code; |
3461 } | 3461 } |
3462 | 3462 |
3463 | 3463 |
3464 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) { | 3464 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) { |
3465 // Allocate ByteArray and ConstantPoolArray before the Code object, so that we | 3465 // Allocate ByteArray and ConstantPoolArray before the Code object, so that we |
3466 // do not risk leaving uninitialized Code object (and breaking the heap). | 3466 // do not risk leaving uninitialized Code object (and breaking the heap). |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3506 | 3506 |
3507 // Update constant pool. | 3507 // Update constant pool. |
3508 new_code->set_constant_pool(new_constant_pool); | 3508 new_code->set_constant_pool(new_constant_pool); |
3509 | 3509 |
3510 // Copy patched rinfo. | 3510 // Copy patched rinfo. |
3511 CopyBytes(new_code->relocation_start(), | 3511 CopyBytes(new_code->relocation_start(), |
3512 reloc_info.start(), | 3512 reloc_info.start(), |
3513 static_cast<size_t>(reloc_info.length())); | 3513 static_cast<size_t>(reloc_info.length())); |
3514 | 3514 |
3515 // Relocate the copy. | 3515 // Relocate the copy. |
3516 ASSERT(isolate_->code_range() == NULL || | 3516 DCHECK(isolate_->code_range() == NULL || |
3517 !isolate_->code_range()->valid() || | 3517 !isolate_->code_range()->valid() || |
3518 isolate_->code_range()->contains(code->address())); | 3518 isolate_->code_range()->contains(code->address())); |
3519 new_code->Relocate(new_addr - old_addr); | 3519 new_code->Relocate(new_addr - old_addr); |
3520 | 3520 |
3521 #ifdef VERIFY_HEAP | 3521 #ifdef VERIFY_HEAP |
3522 if (FLAG_verify_heap) code->ObjectVerify(); | 3522 if (FLAG_verify_heap) code->ObjectVerify(); |
3523 #endif | 3523 #endif |
3524 return new_code; | 3524 return new_code; |
3525 } | 3525 } |
3526 | 3526 |
3527 | 3527 |
3528 void Heap::InitializeAllocationMemento(AllocationMemento* memento, | 3528 void Heap::InitializeAllocationMemento(AllocationMemento* memento, |
3529 AllocationSite* allocation_site) { | 3529 AllocationSite* allocation_site) { |
3530 memento->set_map_no_write_barrier(allocation_memento_map()); | 3530 memento->set_map_no_write_barrier(allocation_memento_map()); |
3531 ASSERT(allocation_site->map() == allocation_site_map()); | 3531 DCHECK(allocation_site->map() == allocation_site_map()); |
3532 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER); | 3532 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER); |
3533 if (FLAG_allocation_site_pretenuring) { | 3533 if (FLAG_allocation_site_pretenuring) { |
3534 allocation_site->IncrementMementoCreateCount(); | 3534 allocation_site->IncrementMementoCreateCount(); |
3535 } | 3535 } |
3536 } | 3536 } |
3537 | 3537 |
3538 | 3538 |
3539 AllocationResult Heap::Allocate(Map* map, AllocationSpace space, | 3539 AllocationResult Heap::Allocate(Map* map, AllocationSpace space, |
3540 AllocationSite* allocation_site) { | 3540 AllocationSite* allocation_site) { |
3541 ASSERT(gc_state_ == NOT_IN_GC); | 3541 DCHECK(gc_state_ == NOT_IN_GC); |
3542 ASSERT(map->instance_type() != MAP_TYPE); | 3542 DCHECK(map->instance_type() != MAP_TYPE); |
3543 // If allocation failures are disallowed, we may allocate in a different | 3543 // If allocation failures are disallowed, we may allocate in a different |
3544 // space when new space is full and the object is not a large object. | 3544 // space when new space is full and the object is not a large object. |
3545 AllocationSpace retry_space = | 3545 AllocationSpace retry_space = |
3546 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); | 3546 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); |
3547 int size = map->instance_size(); | 3547 int size = map->instance_size(); |
3548 if (allocation_site != NULL) { | 3548 if (allocation_site != NULL) { |
3549 size += AllocationMemento::kSize; | 3549 size += AllocationMemento::kSize; |
3550 } | 3550 } |
3551 HeapObject* result; | 3551 HeapObject* result; |
3552 AllocationResult allocation = AllocateRaw(size, space, retry_space); | 3552 AllocationResult allocation = AllocateRaw(size, space, retry_space); |
(...skipping 25 matching lines...) Expand all Loading... |
3578 // We cannot always fill with one_pointer_filler_map because objects | 3578 // We cannot always fill with one_pointer_filler_map because objects |
3579 // created from API functions expect their internal fields to be initialized | 3579 // created from API functions expect their internal fields to be initialized |
3580 // with undefined_value. | 3580 // with undefined_value. |
3581 // Pre-allocated fields need to be initialized with undefined_value as well | 3581 // Pre-allocated fields need to be initialized with undefined_value as well |
3582 // so that object accesses before the constructor completes (e.g. in the | 3582 // so that object accesses before the constructor completes (e.g. in the |
3583 // debugger) will not cause a crash. | 3583 // debugger) will not cause a crash. |
3584 if (map->constructor()->IsJSFunction() && | 3584 if (map->constructor()->IsJSFunction() && |
3585 JSFunction::cast(map->constructor())-> | 3585 JSFunction::cast(map->constructor())-> |
3586 IsInobjectSlackTrackingInProgress()) { | 3586 IsInobjectSlackTrackingInProgress()) { |
3587 // We might want to shrink the object later. | 3587 // We might want to shrink the object later. |
3588 ASSERT(obj->GetInternalFieldCount() == 0); | 3588 DCHECK(obj->GetInternalFieldCount() == 0); |
3589 filler = Heap::one_pointer_filler_map(); | 3589 filler = Heap::one_pointer_filler_map(); |
3590 } else { | 3590 } else { |
3591 filler = Heap::undefined_value(); | 3591 filler = Heap::undefined_value(); |
3592 } | 3592 } |
3593 obj->InitializeBody(map, Heap::undefined_value(), filler); | 3593 obj->InitializeBody(map, Heap::undefined_value(), filler); |
3594 } | 3594 } |
3595 | 3595 |
3596 | 3596 |
3597 AllocationResult Heap::AllocateJSObjectFromMap( | 3597 AllocationResult Heap::AllocateJSObjectFromMap( |
3598 Map* map, | 3598 Map* map, |
3599 PretenureFlag pretenure, | 3599 PretenureFlag pretenure, |
3600 bool allocate_properties, | 3600 bool allocate_properties, |
3601 AllocationSite* allocation_site) { | 3601 AllocationSite* allocation_site) { |
3602 // JSFunctions should be allocated using AllocateFunction to be | 3602 // JSFunctions should be allocated using AllocateFunction to be |
3603 // properly initialized. | 3603 // properly initialized. |
3604 ASSERT(map->instance_type() != JS_FUNCTION_TYPE); | 3604 DCHECK(map->instance_type() != JS_FUNCTION_TYPE); |
3605 | 3605 |
3606 // Both types of global objects should be allocated using | 3606 // Both types of global objects should be allocated using |
3607 // AllocateGlobalObject to be properly initialized. | 3607 // AllocateGlobalObject to be properly initialized. |
3608 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE); | 3608 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE); |
3609 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE); | 3609 DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE); |
3610 | 3610 |
3611 // Allocate the backing storage for the properties. | 3611 // Allocate the backing storage for the properties. |
3612 FixedArray* properties; | 3612 FixedArray* properties; |
3613 if (allocate_properties) { | 3613 if (allocate_properties) { |
3614 int prop_size = map->InitialPropertiesLength(); | 3614 int prop_size = map->InitialPropertiesLength(); |
3615 ASSERT(prop_size >= 0); | 3615 DCHECK(prop_size >= 0); |
3616 { AllocationResult allocation = AllocateFixedArray(prop_size, pretenure); | 3616 { AllocationResult allocation = AllocateFixedArray(prop_size, pretenure); |
3617 if (!allocation.To(&properties)) return allocation; | 3617 if (!allocation.To(&properties)) return allocation; |
3618 } | 3618 } |
3619 } else { | 3619 } else { |
3620 properties = empty_fixed_array(); | 3620 properties = empty_fixed_array(); |
3621 } | 3621 } |
3622 | 3622 |
3623 // Allocate the JSObject. | 3623 // Allocate the JSObject. |
3624 int size = map->instance_size(); | 3624 int size = map->instance_size(); |
3625 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure); | 3625 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure); |
3626 JSObject* js_obj; | 3626 JSObject* js_obj; |
3627 AllocationResult allocation = Allocate(map, space, allocation_site); | 3627 AllocationResult allocation = Allocate(map, space, allocation_site); |
3628 if (!allocation.To(&js_obj)) return allocation; | 3628 if (!allocation.To(&js_obj)) return allocation; |
3629 | 3629 |
3630 // Initialize the JSObject. | 3630 // Initialize the JSObject. |
3631 InitializeJSObjectFromMap(js_obj, properties, map); | 3631 InitializeJSObjectFromMap(js_obj, properties, map); |
3632 ASSERT(js_obj->HasFastElements() || | 3632 DCHECK(js_obj->HasFastElements() || |
3633 js_obj->HasExternalArrayElements() || | 3633 js_obj->HasExternalArrayElements() || |
3634 js_obj->HasFixedTypedArrayElements()); | 3634 js_obj->HasFixedTypedArrayElements()); |
3635 return js_obj; | 3635 return js_obj; |
3636 } | 3636 } |
3637 | 3637 |
3638 | 3638 |
3639 AllocationResult Heap::AllocateJSObject(JSFunction* constructor, | 3639 AllocationResult Heap::AllocateJSObject(JSFunction* constructor, |
3640 PretenureFlag pretenure, | 3640 PretenureFlag pretenure, |
3641 AllocationSite* allocation_site) { | 3641 AllocationSite* allocation_site) { |
3642 ASSERT(constructor->has_initial_map()); | 3642 DCHECK(constructor->has_initial_map()); |
3643 | 3643 |
3644 // Allocate the object based on the constructors initial map. | 3644 // Allocate the object based on the constructors initial map. |
3645 AllocationResult allocation = AllocateJSObjectFromMap( | 3645 AllocationResult allocation = AllocateJSObjectFromMap( |
3646 constructor->initial_map(), pretenure, true, allocation_site); | 3646 constructor->initial_map(), pretenure, true, allocation_site); |
3647 #ifdef DEBUG | 3647 #ifdef DEBUG |
3648 // Make sure result is NOT a global object if valid. | 3648 // Make sure result is NOT a global object if valid. |
3649 HeapObject* obj; | 3649 HeapObject* obj; |
3650 ASSERT(!allocation.To(&obj) || !obj->IsGlobalObject()); | 3650 DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject()); |
3651 #endif | 3651 #endif |
3652 return allocation; | 3652 return allocation; |
3653 } | 3653 } |
3654 | 3654 |
3655 | 3655 |
3656 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) { | 3656 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) { |
3657 // Never used to copy functions. If functions need to be copied we | 3657 // Never used to copy functions. If functions need to be copied we |
3658 // have to be careful to clear the literals array. | 3658 // have to be careful to clear the literals array. |
3659 SLOW_ASSERT(!source->IsJSFunction()); | 3659 SLOW_DCHECK(!source->IsJSFunction()); |
3660 | 3660 |
3661 // Make the clone. | 3661 // Make the clone. |
3662 Map* map = source->map(); | 3662 Map* map = source->map(); |
3663 int object_size = map->instance_size(); | 3663 int object_size = map->instance_size(); |
3664 HeapObject* clone; | 3664 HeapObject* clone; |
3665 | 3665 |
3666 ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type())); | 3666 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type())); |
3667 | 3667 |
3668 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; | 3668 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; |
3669 | 3669 |
3670 // If we're forced to always allocate, we use the general allocation | 3670 // If we're forced to always allocate, we use the general allocation |
3671 // functions which may leave us with an object in old space. | 3671 // functions which may leave us with an object in old space. |
3672 if (always_allocate()) { | 3672 if (always_allocate()) { |
3673 { AllocationResult allocation = | 3673 { AllocationResult allocation = |
3674 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); | 3674 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); |
3675 if (!allocation.To(&clone)) return allocation; | 3675 if (!allocation.To(&clone)) return allocation; |
3676 } | 3676 } |
3677 Address clone_address = clone->address(); | 3677 Address clone_address = clone->address(); |
3678 CopyBlock(clone_address, | 3678 CopyBlock(clone_address, |
3679 source->address(), | 3679 source->address(), |
3680 object_size); | 3680 object_size); |
3681 // Update write barrier for all fields that lie beyond the header. | 3681 // Update write barrier for all fields that lie beyond the header. |
3682 RecordWrites(clone_address, | 3682 RecordWrites(clone_address, |
3683 JSObject::kHeaderSize, | 3683 JSObject::kHeaderSize, |
3684 (object_size - JSObject::kHeaderSize) / kPointerSize); | 3684 (object_size - JSObject::kHeaderSize) / kPointerSize); |
3685 } else { | 3685 } else { |
3686 wb_mode = SKIP_WRITE_BARRIER; | 3686 wb_mode = SKIP_WRITE_BARRIER; |
3687 | 3687 |
3688 { int adjusted_object_size = site != NULL | 3688 { int adjusted_object_size = site != NULL |
3689 ? object_size + AllocationMemento::kSize | 3689 ? object_size + AllocationMemento::kSize |
3690 : object_size; | 3690 : object_size; |
3691 AllocationResult allocation = | 3691 AllocationResult allocation = |
3692 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE); | 3692 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE); |
3693 if (!allocation.To(&clone)) return allocation; | 3693 if (!allocation.To(&clone)) return allocation; |
3694 } | 3694 } |
3695 SLOW_ASSERT(InNewSpace(clone)); | 3695 SLOW_DCHECK(InNewSpace(clone)); |
3696 // Since we know the clone is allocated in new space, we can copy | 3696 // Since we know the clone is allocated in new space, we can copy |
3697 // the contents without worrying about updating the write barrier. | 3697 // the contents without worrying about updating the write barrier. |
3698 CopyBlock(clone->address(), | 3698 CopyBlock(clone->address(), |
3699 source->address(), | 3699 source->address(), |
3700 object_size); | 3700 object_size); |
3701 | 3701 |
3702 if (site != NULL) { | 3702 if (site != NULL) { |
3703 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( | 3703 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( |
3704 reinterpret_cast<Address>(clone) + object_size); | 3704 reinterpret_cast<Address>(clone) + object_size); |
3705 InitializeAllocationMemento(alloc_memento, site); | 3705 InitializeAllocationMemento(alloc_memento, site); |
3706 } | 3706 } |
3707 } | 3707 } |
3708 | 3708 |
3709 SLOW_ASSERT( | 3709 SLOW_DCHECK( |
3710 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); | 3710 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); |
3711 FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); | 3711 FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); |
3712 FixedArray* properties = FixedArray::cast(source->properties()); | 3712 FixedArray* properties = FixedArray::cast(source->properties()); |
3713 // Update elements if necessary. | 3713 // Update elements if necessary. |
3714 if (elements->length() > 0) { | 3714 if (elements->length() > 0) { |
3715 FixedArrayBase* elem; | 3715 FixedArrayBase* elem; |
3716 { AllocationResult allocation; | 3716 { AllocationResult allocation; |
3717 if (elements->map() == fixed_cow_array_map()) { | 3717 if (elements->map() == fixed_cow_array_map()) { |
3718 allocation = FixedArray::cast(elements); | 3718 allocation = FixedArray::cast(elements); |
3719 } else if (source->HasFastDoubleElements()) { | 3719 } else if (source->HasFastDoubleElements()) { |
(...skipping 15 matching lines...) Expand all Loading... |
3735 } | 3735 } |
3736 // Return the new clone. | 3736 // Return the new clone. |
3737 return clone; | 3737 return clone; |
3738 } | 3738 } |
3739 | 3739 |
3740 | 3740 |
3741 static inline void WriteOneByteData(Vector<const char> vector, | 3741 static inline void WriteOneByteData(Vector<const char> vector, |
3742 uint8_t* chars, | 3742 uint8_t* chars, |
3743 int len) { | 3743 int len) { |
3744 // Only works for ascii. | 3744 // Only works for ascii. |
3745 ASSERT(vector.length() == len); | 3745 DCHECK(vector.length() == len); |
3746 MemCopy(chars, vector.start(), len); | 3746 MemCopy(chars, vector.start(), len); |
3747 } | 3747 } |
3748 | 3748 |
3749 static inline void WriteTwoByteData(Vector<const char> vector, | 3749 static inline void WriteTwoByteData(Vector<const char> vector, |
3750 uint16_t* chars, | 3750 uint16_t* chars, |
3751 int len) { | 3751 int len) { |
3752 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start()); | 3752 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start()); |
3753 unsigned stream_length = vector.length(); | 3753 unsigned stream_length = vector.length(); |
3754 while (stream_length != 0) { | 3754 while (stream_length != 0) { |
3755 unsigned consumed = 0; | 3755 unsigned consumed = 0; |
3756 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed); | 3756 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed); |
3757 ASSERT(c != unibrow::Utf8::kBadChar); | 3757 DCHECK(c != unibrow::Utf8::kBadChar); |
3758 ASSERT(consumed <= stream_length); | 3758 DCHECK(consumed <= stream_length); |
3759 stream_length -= consumed; | 3759 stream_length -= consumed; |
3760 stream += consumed; | 3760 stream += consumed; |
3761 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) { | 3761 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) { |
3762 len -= 2; | 3762 len -= 2; |
3763 if (len < 0) break; | 3763 if (len < 0) break; |
3764 *chars++ = unibrow::Utf16::LeadSurrogate(c); | 3764 *chars++ = unibrow::Utf16::LeadSurrogate(c); |
3765 *chars++ = unibrow::Utf16::TrailSurrogate(c); | 3765 *chars++ = unibrow::Utf16::TrailSurrogate(c); |
3766 } else { | 3766 } else { |
3767 len -= 1; | 3767 len -= 1; |
3768 if (len < 0) break; | 3768 if (len < 0) break; |
3769 *chars++ = c; | 3769 *chars++ = c; |
3770 } | 3770 } |
3771 } | 3771 } |
3772 ASSERT(stream_length == 0); | 3772 DCHECK(stream_length == 0); |
3773 ASSERT(len == 0); | 3773 DCHECK(len == 0); |
3774 } | 3774 } |
3775 | 3775 |
3776 | 3776 |
3777 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) { | 3777 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) { |
3778 ASSERT(s->length() == len); | 3778 DCHECK(s->length() == len); |
3779 String::WriteToFlat(s, chars, 0, len); | 3779 String::WriteToFlat(s, chars, 0, len); |
3780 } | 3780 } |
3781 | 3781 |
3782 | 3782 |
3783 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) { | 3783 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) { |
3784 ASSERT(s->length() == len); | 3784 DCHECK(s->length() == len); |
3785 String::WriteToFlat(s, chars, 0, len); | 3785 String::WriteToFlat(s, chars, 0, len); |
3786 } | 3786 } |
3787 | 3787 |
3788 | 3788 |
3789 template<bool is_one_byte, typename T> | 3789 template<bool is_one_byte, typename T> |
3790 AllocationResult Heap::AllocateInternalizedStringImpl( | 3790 AllocationResult Heap::AllocateInternalizedStringImpl( |
3791 T t, int chars, uint32_t hash_field) { | 3791 T t, int chars, uint32_t hash_field) { |
3792 ASSERT(chars >= 0); | 3792 DCHECK(chars >= 0); |
3793 // Compute map and object size. | 3793 // Compute map and object size. |
3794 int size; | 3794 int size; |
3795 Map* map; | 3795 Map* map; |
3796 | 3796 |
3797 ASSERT_LE(0, chars); | 3797 DCHECK_LE(0, chars); |
3798 ASSERT_GE(String::kMaxLength, chars); | 3798 DCHECK_GE(String::kMaxLength, chars); |
3799 if (is_one_byte) { | 3799 if (is_one_byte) { |
3800 map = ascii_internalized_string_map(); | 3800 map = ascii_internalized_string_map(); |
3801 size = SeqOneByteString::SizeFor(chars); | 3801 size = SeqOneByteString::SizeFor(chars); |
3802 } else { | 3802 } else { |
3803 map = internalized_string_map(); | 3803 map = internalized_string_map(); |
3804 size = SeqTwoByteString::SizeFor(chars); | 3804 size = SeqTwoByteString::SizeFor(chars); |
3805 } | 3805 } |
3806 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED); | 3806 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED); |
3807 | 3807 |
3808 // Allocate string. | 3808 // Allocate string. |
3809 HeapObject* result; | 3809 HeapObject* result; |
3810 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); | 3810 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); |
3811 if (!allocation.To(&result)) return allocation; | 3811 if (!allocation.To(&result)) return allocation; |
3812 } | 3812 } |
3813 | 3813 |
3814 result->set_map_no_write_barrier(map); | 3814 result->set_map_no_write_barrier(map); |
3815 // Set length and hash fields of the allocated string. | 3815 // Set length and hash fields of the allocated string. |
3816 String* answer = String::cast(result); | 3816 String* answer = String::cast(result); |
3817 answer->set_length(chars); | 3817 answer->set_length(chars); |
3818 answer->set_hash_field(hash_field); | 3818 answer->set_hash_field(hash_field); |
3819 | 3819 |
3820 ASSERT_EQ(size, answer->Size()); | 3820 DCHECK_EQ(size, answer->Size()); |
3821 | 3821 |
3822 if (is_one_byte) { | 3822 if (is_one_byte) { |
3823 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars); | 3823 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars); |
3824 } else { | 3824 } else { |
3825 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars); | 3825 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars); |
3826 } | 3826 } |
3827 return answer; | 3827 return answer; |
3828 } | 3828 } |
3829 | 3829 |
3830 | 3830 |
3831 // Need explicit instantiations. | 3831 // Need explicit instantiations. |
3832 template | 3832 template |
3833 AllocationResult Heap::AllocateInternalizedStringImpl<true>( | 3833 AllocationResult Heap::AllocateInternalizedStringImpl<true>( |
3834 String*, int, uint32_t); | 3834 String*, int, uint32_t); |
3835 template | 3835 template |
3836 AllocationResult Heap::AllocateInternalizedStringImpl<false>( | 3836 AllocationResult Heap::AllocateInternalizedStringImpl<false>( |
3837 String*, int, uint32_t); | 3837 String*, int, uint32_t); |
3838 template | 3838 template |
3839 AllocationResult Heap::AllocateInternalizedStringImpl<false>( | 3839 AllocationResult Heap::AllocateInternalizedStringImpl<false>( |
3840 Vector<const char>, int, uint32_t); | 3840 Vector<const char>, int, uint32_t); |
3841 | 3841 |
3842 | 3842 |
3843 AllocationResult Heap::AllocateRawOneByteString(int length, | 3843 AllocationResult Heap::AllocateRawOneByteString(int length, |
3844 PretenureFlag pretenure) { | 3844 PretenureFlag pretenure) { |
3845 ASSERT_LE(0, length); | 3845 DCHECK_LE(0, length); |
3846 ASSERT_GE(String::kMaxLength, length); | 3846 DCHECK_GE(String::kMaxLength, length); |
3847 int size = SeqOneByteString::SizeFor(length); | 3847 int size = SeqOneByteString::SizeFor(length); |
3848 ASSERT(size <= SeqOneByteString::kMaxSize); | 3848 DCHECK(size <= SeqOneByteString::kMaxSize); |
3849 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); | 3849 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); |
3850 | 3850 |
3851 HeapObject* result; | 3851 HeapObject* result; |
3852 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); | 3852 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); |
3853 if (!allocation.To(&result)) return allocation; | 3853 if (!allocation.To(&result)) return allocation; |
3854 } | 3854 } |
3855 | 3855 |
3856 // Partially initialize the object. | 3856 // Partially initialize the object. |
3857 result->set_map_no_write_barrier(ascii_string_map()); | 3857 result->set_map_no_write_barrier(ascii_string_map()); |
3858 String::cast(result)->set_length(length); | 3858 String::cast(result)->set_length(length); |
3859 String::cast(result)->set_hash_field(String::kEmptyHashField); | 3859 String::cast(result)->set_hash_field(String::kEmptyHashField); |
3860 ASSERT_EQ(size, HeapObject::cast(result)->Size()); | 3860 DCHECK_EQ(size, HeapObject::cast(result)->Size()); |
3861 | 3861 |
3862 return result; | 3862 return result; |
3863 } | 3863 } |
3864 | 3864 |
3865 | 3865 |
3866 AllocationResult Heap::AllocateRawTwoByteString(int length, | 3866 AllocationResult Heap::AllocateRawTwoByteString(int length, |
3867 PretenureFlag pretenure) { | 3867 PretenureFlag pretenure) { |
3868 ASSERT_LE(0, length); | 3868 DCHECK_LE(0, length); |
3869 ASSERT_GE(String::kMaxLength, length); | 3869 DCHECK_GE(String::kMaxLength, length); |
3870 int size = SeqTwoByteString::SizeFor(length); | 3870 int size = SeqTwoByteString::SizeFor(length); |
3871 ASSERT(size <= SeqTwoByteString::kMaxSize); | 3871 DCHECK(size <= SeqTwoByteString::kMaxSize); |
3872 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); | 3872 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); |
3873 | 3873 |
3874 HeapObject* result; | 3874 HeapObject* result; |
3875 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); | 3875 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); |
3876 if (!allocation.To(&result)) return allocation; | 3876 if (!allocation.To(&result)) return allocation; |
3877 } | 3877 } |
3878 | 3878 |
3879 // Partially initialize the object. | 3879 // Partially initialize the object. |
3880 result->set_map_no_write_barrier(string_map()); | 3880 result->set_map_no_write_barrier(string_map()); |
3881 String::cast(result)->set_length(length); | 3881 String::cast(result)->set_length(length); |
3882 String::cast(result)->set_hash_field(String::kEmptyHashField); | 3882 String::cast(result)->set_hash_field(String::kEmptyHashField); |
3883 ASSERT_EQ(size, HeapObject::cast(result)->Size()); | 3883 DCHECK_EQ(size, HeapObject::cast(result)->Size()); |
3884 return result; | 3884 return result; |
3885 } | 3885 } |
3886 | 3886 |
3887 | 3887 |
3888 AllocationResult Heap::AllocateEmptyFixedArray() { | 3888 AllocationResult Heap::AllocateEmptyFixedArray() { |
3889 int size = FixedArray::SizeFor(0); | 3889 int size = FixedArray::SizeFor(0); |
3890 HeapObject* result; | 3890 HeapObject* result; |
3891 { AllocationResult allocation = | 3891 { AllocationResult allocation = |
3892 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); | 3892 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); |
3893 if (!allocation.To(&result)) return allocation; | 3893 if (!allocation.To(&result)) return allocation; |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4013 int size = FixedArray::SizeFor(length); | 4013 int size = FixedArray::SizeFor(length); |
4014 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure); | 4014 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure); |
4015 | 4015 |
4016 return AllocateRaw(size, space, OLD_POINTER_SPACE); | 4016 return AllocateRaw(size, space, OLD_POINTER_SPACE); |
4017 } | 4017 } |
4018 | 4018 |
4019 | 4019 |
4020 AllocationResult Heap::AllocateFixedArrayWithFiller(int length, | 4020 AllocationResult Heap::AllocateFixedArrayWithFiller(int length, |
4021 PretenureFlag pretenure, | 4021 PretenureFlag pretenure, |
4022 Object* filler) { | 4022 Object* filler) { |
4023 ASSERT(length >= 0); | 4023 DCHECK(length >= 0); |
4024 ASSERT(empty_fixed_array()->IsFixedArray()); | 4024 DCHECK(empty_fixed_array()->IsFixedArray()); |
4025 if (length == 0) return empty_fixed_array(); | 4025 if (length == 0) return empty_fixed_array(); |
4026 | 4026 |
4027 ASSERT(!InNewSpace(filler)); | 4027 DCHECK(!InNewSpace(filler)); |
4028 HeapObject* result; | 4028 HeapObject* result; |
4029 { AllocationResult allocation = AllocateRawFixedArray(length, pretenure); | 4029 { AllocationResult allocation = AllocateRawFixedArray(length, pretenure); |
4030 if (!allocation.To(&result)) return allocation; | 4030 if (!allocation.To(&result)) return allocation; |
4031 } | 4031 } |
4032 | 4032 |
4033 result->set_map_no_write_barrier(fixed_array_map()); | 4033 result->set_map_no_write_barrier(fixed_array_map()); |
4034 FixedArray* array = FixedArray::cast(result); | 4034 FixedArray* array = FixedArray::cast(result); |
4035 array->set_length(length); | 4035 array->set_length(length); |
4036 MemsetPointer(array->data_start(), filler, length); | 4036 MemsetPointer(array->data_start(), filler, length); |
4037 return array; | 4037 return array; |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4172 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask; | 4172 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask; |
4173 attempts++; | 4173 attempts++; |
4174 } while (hash == 0 && attempts < 30); | 4174 } while (hash == 0 && attempts < 30); |
4175 if (hash == 0) hash = 1; // never return 0 | 4175 if (hash == 0) hash = 1; // never return 0 |
4176 | 4176 |
4177 Symbol::cast(result)->set_hash_field( | 4177 Symbol::cast(result)->set_hash_field( |
4178 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift)); | 4178 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift)); |
4179 Symbol::cast(result)->set_name(undefined_value()); | 4179 Symbol::cast(result)->set_name(undefined_value()); |
4180 Symbol::cast(result)->set_flags(Smi::FromInt(0)); | 4180 Symbol::cast(result)->set_flags(Smi::FromInt(0)); |
4181 | 4181 |
4182 ASSERT(!Symbol::cast(result)->is_private()); | 4182 DCHECK(!Symbol::cast(result)->is_private()); |
4183 return result; | 4183 return result; |
4184 } | 4184 } |
4185 | 4185 |
4186 | 4186 |
4187 AllocationResult Heap::AllocateStruct(InstanceType type) { | 4187 AllocationResult Heap::AllocateStruct(InstanceType type) { |
4188 Map* map; | 4188 Map* map; |
4189 switch (type) { | 4189 switch (type) { |
4190 #define MAKE_CASE(NAME, Name, name) \ | 4190 #define MAKE_CASE(NAME, Name, name) \ |
4191 case NAME##_TYPE: map = name##_map(); break; | 4191 case NAME##_TYPE: map = name##_map(); break; |
4192 STRUCT_LIST(MAKE_CASE) | 4192 STRUCT_LIST(MAKE_CASE) |
(...skipping 16 matching lines...) Expand all Loading... |
4209 bool Heap::IsHeapIterable() { | 4209 bool Heap::IsHeapIterable() { |
4210 // TODO(hpayer): This function is not correct. Allocation folding in old | 4210 // TODO(hpayer): This function is not correct. Allocation folding in old |
4211 // space breaks the iterability. | 4211 // space breaks the iterability. |
4212 return (old_pointer_space()->swept_precisely() && | 4212 return (old_pointer_space()->swept_precisely() && |
4213 old_data_space()->swept_precisely() && | 4213 old_data_space()->swept_precisely() && |
4214 new_space_top_after_last_gc_ == new_space()->top()); | 4214 new_space_top_after_last_gc_ == new_space()->top()); |
4215 } | 4215 } |
4216 | 4216 |
4217 | 4217 |
4218 void Heap::MakeHeapIterable() { | 4218 void Heap::MakeHeapIterable() { |
4219 ASSERT(AllowHeapAllocation::IsAllowed()); | 4219 DCHECK(AllowHeapAllocation::IsAllowed()); |
4220 if (!IsHeapIterable()) { | 4220 if (!IsHeapIterable()) { |
4221 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable"); | 4221 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable"); |
4222 } | 4222 } |
4223 if (mark_compact_collector()->sweeping_in_progress()) { | 4223 if (mark_compact_collector()->sweeping_in_progress()) { |
4224 mark_compact_collector()->EnsureSweepingCompleted(); | 4224 mark_compact_collector()->EnsureSweepingCompleted(); |
4225 } | 4225 } |
4226 ASSERT(IsHeapIterable()); | 4226 DCHECK(IsHeapIterable()); |
4227 } | 4227 } |
4228 | 4228 |
4229 | 4229 |
4230 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) { | 4230 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) { |
4231 incremental_marking()->Step(step_size, | 4231 incremental_marking()->Step(step_size, |
4232 IncrementalMarking::NO_GC_VIA_STACK_GUARD); | 4232 IncrementalMarking::NO_GC_VIA_STACK_GUARD); |
4233 | 4233 |
4234 if (incremental_marking()->IsComplete()) { | 4234 if (incremental_marking()->IsComplete()) { |
4235 bool uncommit = false; | 4235 bool uncommit = false; |
4236 if (gc_count_at_last_idle_gc_ == gc_count_) { | 4236 if (gc_count_at_last_idle_gc_ == gc_count_) { |
(...skipping 292 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4529 // If the store buffer becomes overfull we mark pages as being exempt from | 4529 // If the store buffer becomes overfull we mark pages as being exempt from |
4530 // the store buffer. These pages are scanned to find pointers that point | 4530 // the store buffer. These pages are scanned to find pointers that point |
4531 // to the new space. In that case we may hit newly promoted objects and | 4531 // to the new space. In that case we may hit newly promoted objects and |
4532 // fix the pointers before the promotion queue gets to them. Thus the 'if'. | 4532 // fix the pointers before the promotion queue gets to them. Thus the 'if'. |
4533 if (object->IsHeapObject()) { | 4533 if (object->IsHeapObject()) { |
4534 if (Heap::InFromSpace(object)) { | 4534 if (Heap::InFromSpace(object)) { |
4535 callback(reinterpret_cast<HeapObject**>(slot), | 4535 callback(reinterpret_cast<HeapObject**>(slot), |
4536 HeapObject::cast(object)); | 4536 HeapObject::cast(object)); |
4537 Object* new_object = *slot; | 4537 Object* new_object = *slot; |
4538 if (InNewSpace(new_object)) { | 4538 if (InNewSpace(new_object)) { |
4539 SLOW_ASSERT(Heap::InToSpace(new_object)); | 4539 SLOW_DCHECK(Heap::InToSpace(new_object)); |
4540 SLOW_ASSERT(new_object->IsHeapObject()); | 4540 SLOW_DCHECK(new_object->IsHeapObject()); |
4541 store_buffer_.EnterDirectlyIntoStoreBuffer( | 4541 store_buffer_.EnterDirectlyIntoStoreBuffer( |
4542 reinterpret_cast<Address>(slot)); | 4542 reinterpret_cast<Address>(slot)); |
4543 } | 4543 } |
4544 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object)); | 4544 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object)); |
4545 } else if (record_slots && | 4545 } else if (record_slots && |
4546 MarkCompactCollector::IsOnEvacuationCandidate(object)) { | 4546 MarkCompactCollector::IsOnEvacuationCandidate(object)) { |
4547 mark_compact_collector()->RecordSlot(slot, slot, object); | 4547 mark_compact_collector()->RecordSlot(slot, slot, object); |
4548 } | 4548 } |
4549 } | 4549 } |
4550 slot_address += kPointerSize; | 4550 slot_address += kPointerSize; |
4551 } | 4551 } |
4552 } | 4552 } |
4553 | 4553 |
4554 | 4554 |
(...skipping 25 matching lines...) Expand all Loading... |
4580 Map* free_space_map = heap->free_space_map(); | 4580 Map* free_space_map = heap->free_space_map(); |
4581 for ( ; current < limit; current++) { | 4581 for ( ; current < limit; current++) { |
4582 Object* o = *current; | 4582 Object* o = *current; |
4583 Address current_address = reinterpret_cast<Address>(current); | 4583 Address current_address = reinterpret_cast<Address>(current); |
4584 // Skip free space. | 4584 // Skip free space. |
4585 if (o == free_space_map) { | 4585 if (o == free_space_map) { |
4586 Address current_address = reinterpret_cast<Address>(current); | 4586 Address current_address = reinterpret_cast<Address>(current); |
4587 FreeSpace* free_space = | 4587 FreeSpace* free_space = |
4588 FreeSpace::cast(HeapObject::FromAddress(current_address)); | 4588 FreeSpace::cast(HeapObject::FromAddress(current_address)); |
4589 int skip = free_space->Size(); | 4589 int skip = free_space->Size(); |
4590 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit)); | 4590 DCHECK(current_address + skip <= reinterpret_cast<Address>(limit)); |
4591 ASSERT(skip > 0); | 4591 DCHECK(skip > 0); |
4592 current_address += skip - kPointerSize; | 4592 current_address += skip - kPointerSize; |
4593 current = reinterpret_cast<Object**>(current_address); | 4593 current = reinterpret_cast<Object**>(current_address); |
4594 continue; | 4594 continue; |
4595 } | 4595 } |
4596 // Skip the current linear allocation space between top and limit which is | 4596 // Skip the current linear allocation space between top and limit which is |
4597 // unmarked with the free space map, but can contain junk. | 4597 // unmarked with the free space map, but can contain junk. |
4598 if (current_address == special_garbage_start && | 4598 if (current_address == special_garbage_start && |
4599 special_garbage_end != special_garbage_start) { | 4599 special_garbage_end != special_garbage_start) { |
4600 current_address = special_garbage_end - kPointerSize; | 4600 current_address = special_garbage_end - kPointerSize; |
4601 current = reinterpret_cast<Object**>(current_address); | 4601 current = reinterpret_cast<Object**>(current_address); |
4602 continue; | 4602 continue; |
4603 } | 4603 } |
4604 if (!(*filter)(current)) continue; | 4604 if (!(*filter)(current)) continue; |
4605 ASSERT(current_address < special_garbage_start || | 4605 DCHECK(current_address < special_garbage_start || |
4606 current_address >= special_garbage_end); | 4606 current_address >= special_garbage_end); |
4607 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue); | 4607 DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue); |
4608 // We have to check that the pointer does not point into new space | 4608 // We have to check that the pointer does not point into new space |
4609 // without trying to cast it to a heap object since the hash field of | 4609 // without trying to cast it to a heap object since the hash field of |
4610 // a string can contain values like 1 and 3 which are tagged null | 4610 // a string can contain values like 1 and 3 which are tagged null |
4611 // pointers. | 4611 // pointers. |
4612 if (!heap->InNewSpace(o)) continue; | 4612 if (!heap->InNewSpace(o)) continue; |
4613 while (**store_buffer_position < current && | 4613 while (**store_buffer_position < current && |
4614 *store_buffer_position < store_buffer_top) { | 4614 *store_buffer_position < store_buffer_top) { |
4615 (*store_buffer_position)++; | 4615 (*store_buffer_position)++; |
4616 } | 4616 } |
4617 if (**store_buffer_position != current || | 4617 if (**store_buffer_position != current || |
(...skipping 275 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4893 | 4893 |
4894 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_); | 4894 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_); |
4895 | 4895 |
4896 // The old generation is paged and needs at least one page for each space. | 4896 // The old generation is paged and needs at least one page for each space. |
4897 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; | 4897 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; |
4898 max_old_generation_size_ = | 4898 max_old_generation_size_ = |
4899 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize), | 4899 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize), |
4900 max_old_generation_size_); | 4900 max_old_generation_size_); |
4901 | 4901 |
4902 // We rely on being able to allocate new arrays in paged spaces. | 4902 // We rely on being able to allocate new arrays in paged spaces. |
4903 ASSERT(Page::kMaxRegularHeapObjectSize >= | 4903 DCHECK(Page::kMaxRegularHeapObjectSize >= |
4904 (JSArray::kSize + | 4904 (JSArray::kSize + |
4905 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) + | 4905 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) + |
4906 AllocationMemento::kSize)); | 4906 AllocationMemento::kSize)); |
4907 | 4907 |
4908 code_range_size_ = code_range_size * MB; | 4908 code_range_size_ = code_range_size * MB; |
4909 | 4909 |
4910 configured_ = true; | 4910 configured_ = true; |
4911 return true; | 4911 return true; |
4912 } | 4912 } |
4913 | 4913 |
(...skipping 27 matching lines...) Expand all Loading... |
4941 isolate()->memory_allocator()->Size() + | 4941 isolate()->memory_allocator()->Size() + |
4942 isolate()->memory_allocator()->Available(); | 4942 isolate()->memory_allocator()->Available(); |
4943 *stats->os_error = base::OS::GetLastError(); | 4943 *stats->os_error = base::OS::GetLastError(); |
4944 isolate()->memory_allocator()->Available(); | 4944 isolate()->memory_allocator()->Available(); |
4945 if (take_snapshot) { | 4945 if (take_snapshot) { |
4946 HeapIterator iterator(this); | 4946 HeapIterator iterator(this); |
4947 for (HeapObject* obj = iterator.next(); | 4947 for (HeapObject* obj = iterator.next(); |
4948 obj != NULL; | 4948 obj != NULL; |
4949 obj = iterator.next()) { | 4949 obj = iterator.next()) { |
4950 InstanceType type = obj->map()->instance_type(); | 4950 InstanceType type = obj->map()->instance_type(); |
4951 ASSERT(0 <= type && type <= LAST_TYPE); | 4951 DCHECK(0 <= type && type <= LAST_TYPE); |
4952 stats->objects_per_type[type]++; | 4952 stats->objects_per_type[type]++; |
4953 stats->size_per_type[type] += obj->Size(); | 4953 stats->size_per_type[type] += obj->Size(); |
4954 } | 4954 } |
4955 } | 4955 } |
4956 } | 4956 } |
4957 | 4957 |
4958 | 4958 |
4959 intptr_t Heap::PromotedSpaceSizeOfObjects() { | 4959 intptr_t Heap::PromotedSpaceSizeOfObjects() { |
4960 return old_pointer_space_->SizeOfObjects() | 4960 return old_pointer_space_->SizeOfObjects() |
4961 + old_data_space_->SizeOfObjects() | 4961 + old_data_space_->SizeOfObjects() |
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5126 if (!property_cell_space_->SetUp()) return false; | 5126 if (!property_cell_space_->SetUp()) return false; |
5127 | 5127 |
5128 // The large object code space may contain code or data. We set the memory | 5128 // The large object code space may contain code or data. We set the memory |
5129 // to be non-executable here for safety, but this means we need to enable it | 5129 // to be non-executable here for safety, but this means we need to enable it |
5130 // explicitly when allocating large code objects. | 5130 // explicitly when allocating large code objects. |
5131 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE); | 5131 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE); |
5132 if (lo_space_ == NULL) return false; | 5132 if (lo_space_ == NULL) return false; |
5133 if (!lo_space_->SetUp()) return false; | 5133 if (!lo_space_->SetUp()) return false; |
5134 | 5134 |
5135 // Set up the seed that is used to randomize the string hash function. | 5135 // Set up the seed that is used to randomize the string hash function. |
5136 ASSERT(hash_seed() == 0); | 5136 DCHECK(hash_seed() == 0); |
5137 if (FLAG_randomize_hashes) { | 5137 if (FLAG_randomize_hashes) { |
5138 if (FLAG_hash_seed == 0) { | 5138 if (FLAG_hash_seed == 0) { |
5139 int rnd = isolate()->random_number_generator()->NextInt(); | 5139 int rnd = isolate()->random_number_generator()->NextInt(); |
5140 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask)); | 5140 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask)); |
5141 } else { | 5141 } else { |
5142 set_hash_seed(Smi::FromInt(FLAG_hash_seed)); | 5142 set_hash_seed(Smi::FromInt(FLAG_hash_seed)); |
5143 } | 5143 } |
5144 } | 5144 } |
5145 | 5145 |
5146 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); | 5146 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); |
(...skipping 18 matching lines...) Expand all Loading... |
5165 | 5165 |
5166 set_native_contexts_list(undefined_value()); | 5166 set_native_contexts_list(undefined_value()); |
5167 set_array_buffers_list(undefined_value()); | 5167 set_array_buffers_list(undefined_value()); |
5168 set_allocation_sites_list(undefined_value()); | 5168 set_allocation_sites_list(undefined_value()); |
5169 weak_object_to_code_table_ = undefined_value(); | 5169 weak_object_to_code_table_ = undefined_value(); |
5170 return true; | 5170 return true; |
5171 } | 5171 } |
5172 | 5172 |
5173 | 5173 |
5174 void Heap::SetStackLimits() { | 5174 void Heap::SetStackLimits() { |
5175 ASSERT(isolate_ != NULL); | 5175 DCHECK(isolate_ != NULL); |
5176 ASSERT(isolate_ == isolate()); | 5176 DCHECK(isolate_ == isolate()); |
5177 // On 64 bit machines, pointers are generally out of range of Smis. We write | 5177 // On 64 bit machines, pointers are generally out of range of Smis. We write |
5178 // something that looks like an out of range Smi to the GC. | 5178 // something that looks like an out of range Smi to the GC. |
5179 | 5179 |
5180 // Set up the special root array entries containing the stack limits. | 5180 // Set up the special root array entries containing the stack limits. |
5181 // These are actually addresses, but the tag makes the GC ignore it. | 5181 // These are actually addresses, but the tag makes the GC ignore it. |
5182 roots_[kStackLimitRootIndex] = | 5182 roots_[kStackLimitRootIndex] = |
5183 reinterpret_cast<Object*>( | 5183 reinterpret_cast<Object*>( |
5184 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag); | 5184 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag); |
5185 roots_[kRealStackLimitRootIndex] = | 5185 roots_[kRealStackLimitRootIndex] = |
5186 reinterpret_cast<Object*>( | 5186 reinterpret_cast<Object*>( |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5295 store_buffer()->TearDown(); | 5295 store_buffer()->TearDown(); |
5296 incremental_marking()->TearDown(); | 5296 incremental_marking()->TearDown(); |
5297 | 5297 |
5298 isolate_->memory_allocator()->TearDown(); | 5298 isolate_->memory_allocator()->TearDown(); |
5299 } | 5299 } |
5300 | 5300 |
5301 | 5301 |
5302 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, | 5302 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, |
5303 GCType gc_type, | 5303 GCType gc_type, |
5304 bool pass_isolate) { | 5304 bool pass_isolate) { |
5305 ASSERT(callback != NULL); | 5305 DCHECK(callback != NULL); |
5306 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate); | 5306 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate); |
5307 ASSERT(!gc_prologue_callbacks_.Contains(pair)); | 5307 DCHECK(!gc_prologue_callbacks_.Contains(pair)); |
5308 return gc_prologue_callbacks_.Add(pair); | 5308 return gc_prologue_callbacks_.Add(pair); |
5309 } | 5309 } |
5310 | 5310 |
5311 | 5311 |
5312 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) { | 5312 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) { |
5313 ASSERT(callback != NULL); | 5313 DCHECK(callback != NULL); |
5314 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { | 5314 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { |
5315 if (gc_prologue_callbacks_[i].callback == callback) { | 5315 if (gc_prologue_callbacks_[i].callback == callback) { |
5316 gc_prologue_callbacks_.Remove(i); | 5316 gc_prologue_callbacks_.Remove(i); |
5317 return; | 5317 return; |
5318 } | 5318 } |
5319 } | 5319 } |
5320 UNREACHABLE(); | 5320 UNREACHABLE(); |
5321 } | 5321 } |
5322 | 5322 |
5323 | 5323 |
5324 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, | 5324 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, |
5325 GCType gc_type, | 5325 GCType gc_type, |
5326 bool pass_isolate) { | 5326 bool pass_isolate) { |
5327 ASSERT(callback != NULL); | 5327 DCHECK(callback != NULL); |
5328 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate); | 5328 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate); |
5329 ASSERT(!gc_epilogue_callbacks_.Contains(pair)); | 5329 DCHECK(!gc_epilogue_callbacks_.Contains(pair)); |
5330 return gc_epilogue_callbacks_.Add(pair); | 5330 return gc_epilogue_callbacks_.Add(pair); |
5331 } | 5331 } |
5332 | 5332 |
5333 | 5333 |
5334 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) { | 5334 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) { |
5335 ASSERT(callback != NULL); | 5335 DCHECK(callback != NULL); |
5336 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { | 5336 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { |
5337 if (gc_epilogue_callbacks_[i].callback == callback) { | 5337 if (gc_epilogue_callbacks_[i].callback == callback) { |
5338 gc_epilogue_callbacks_.Remove(i); | 5338 gc_epilogue_callbacks_.Remove(i); |
5339 return; | 5339 return; |
5340 } | 5340 } |
5341 } | 5341 } |
5342 UNREACHABLE(); | 5342 UNREACHABLE(); |
5343 } | 5343 } |
5344 | 5344 |
5345 | 5345 |
5346 // TODO(ishell): Find a better place for this. | 5346 // TODO(ishell): Find a better place for this. |
5347 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj, | 5347 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj, |
5348 Handle<DependentCode> dep) { | 5348 Handle<DependentCode> dep) { |
5349 ASSERT(!InNewSpace(*obj)); | 5349 DCHECK(!InNewSpace(*obj)); |
5350 ASSERT(!InNewSpace(*dep)); | 5350 DCHECK(!InNewSpace(*dep)); |
5351 // This handle scope keeps the table handle local to this function, which | 5351 // This handle scope keeps the table handle local to this function, which |
5352 // allows us to safely skip write barriers in table update operations. | 5352 // allows us to safely skip write barriers in table update operations. |
5353 HandleScope scope(isolate()); | 5353 HandleScope scope(isolate()); |
5354 Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_), | 5354 Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_), |
5355 isolate()); | 5355 isolate()); |
5356 table = WeakHashTable::Put(table, obj, dep); | 5356 table = WeakHashTable::Put(table, obj, dep); |
5357 | 5357 |
5358 if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) { | 5358 if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) { |
5359 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value()); | 5359 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value()); |
5360 } | 5360 } |
5361 set_weak_object_to_code_table(*table); | 5361 set_weak_object_to_code_table(*table); |
5362 ASSERT_EQ(*dep, table->Lookup(obj)); | 5362 DCHECK_EQ(*dep, table->Lookup(obj)); |
5363 } | 5363 } |
5364 | 5364 |
5365 | 5365 |
5366 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) { | 5366 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) { |
5367 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj); | 5367 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj); |
5368 if (dep->IsDependentCode()) return DependentCode::cast(dep); | 5368 if (dep->IsDependentCode()) return DependentCode::cast(dep); |
5369 return DependentCode::cast(empty_fixed_array()); | 5369 return DependentCode::cast(empty_fixed_array()); |
5370 } | 5370 } |
5371 | 5371 |
5372 | 5372 |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5502 } | 5502 } |
5503 } | 5503 } |
5504 | 5504 |
5505 // Return iterator for the new current space. | 5505 // Return iterator for the new current space. |
5506 return CreateIterator(); | 5506 return CreateIterator(); |
5507 } | 5507 } |
5508 | 5508 |
5509 | 5509 |
5510 // Create an iterator for the space to iterate. | 5510 // Create an iterator for the space to iterate. |
5511 ObjectIterator* SpaceIterator::CreateIterator() { | 5511 ObjectIterator* SpaceIterator::CreateIterator() { |
5512 ASSERT(iterator_ == NULL); | 5512 DCHECK(iterator_ == NULL); |
5513 | 5513 |
5514 switch (current_space_) { | 5514 switch (current_space_) { |
5515 case NEW_SPACE: | 5515 case NEW_SPACE: |
5516 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_); | 5516 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_); |
5517 break; | 5517 break; |
5518 case OLD_POINTER_SPACE: | 5518 case OLD_POINTER_SPACE: |
5519 iterator_ = | 5519 iterator_ = |
5520 new HeapObjectIterator(heap_->old_pointer_space(), size_func_); | 5520 new HeapObjectIterator(heap_->old_pointer_space(), size_func_); |
5521 break; | 5521 break; |
5522 case OLD_DATA_SPACE: | 5522 case OLD_DATA_SPACE: |
(...skipping 11 matching lines...) Expand all Loading... |
5534 case PROPERTY_CELL_SPACE: | 5534 case PROPERTY_CELL_SPACE: |
5535 iterator_ = new HeapObjectIterator(heap_->property_cell_space(), | 5535 iterator_ = new HeapObjectIterator(heap_->property_cell_space(), |
5536 size_func_); | 5536 size_func_); |
5537 break; | 5537 break; |
5538 case LO_SPACE: | 5538 case LO_SPACE: |
5539 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_); | 5539 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_); |
5540 break; | 5540 break; |
5541 } | 5541 } |
5542 | 5542 |
5543 // Return the newly allocated iterator; | 5543 // Return the newly allocated iterator; |
5544 ASSERT(iterator_ != NULL); | 5544 DCHECK(iterator_ != NULL); |
5545 return iterator_; | 5545 return iterator_; |
5546 } | 5546 } |
5547 | 5547 |
5548 | 5548 |
5549 class HeapObjectsFilter { | 5549 class HeapObjectsFilter { |
5550 public: | 5550 public: |
5551 virtual ~HeapObjectsFilter() {} | 5551 virtual ~HeapObjectsFilter() {} |
5552 virtual bool SkipObject(HeapObject* object) = 0; | 5552 virtual bool SkipObject(HeapObject* object) = 0; |
5553 }; | 5553 }; |
5554 | 5554 |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5645 } | 5645 } |
5646 object_iterator_ = space_iterator_->next(); | 5646 object_iterator_ = space_iterator_->next(); |
5647 } | 5647 } |
5648 | 5648 |
5649 | 5649 |
5650 void HeapIterator::Shutdown() { | 5650 void HeapIterator::Shutdown() { |
5651 #ifdef DEBUG | 5651 #ifdef DEBUG |
5652 // Assert that in filtering mode we have iterated through all | 5652 // Assert that in filtering mode we have iterated through all |
5653 // objects. Otherwise, heap will be left in an inconsistent state. | 5653 // objects. Otherwise, heap will be left in an inconsistent state. |
5654 if (filtering_ != kNoFiltering) { | 5654 if (filtering_ != kNoFiltering) { |
5655 ASSERT(object_iterator_ == NULL); | 5655 DCHECK(object_iterator_ == NULL); |
5656 } | 5656 } |
5657 #endif | 5657 #endif |
5658 // Make sure the last iterator is deallocated. | 5658 // Make sure the last iterator is deallocated. |
5659 delete space_iterator_; | 5659 delete space_iterator_; |
5660 space_iterator_ = NULL; | 5660 space_iterator_ = NULL; |
5661 object_iterator_ = NULL; | 5661 object_iterator_ = NULL; |
5662 delete filter_; | 5662 delete filter_; |
5663 filter_ = NULL; | 5663 filter_ = NULL; |
5664 } | 5664 } |
5665 | 5665 |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5750 } | 5750 } |
5751 | 5751 |
5752 | 5752 |
5753 void PathTracer::Reset() { | 5753 void PathTracer::Reset() { |
5754 found_target_ = false; | 5754 found_target_ = false; |
5755 object_stack_.Clear(); | 5755 object_stack_.Clear(); |
5756 } | 5756 } |
5757 | 5757 |
5758 | 5758 |
5759 void PathTracer::TracePathFrom(Object** root) { | 5759 void PathTracer::TracePathFrom(Object** root) { |
5760 ASSERT((search_target_ == kAnyGlobalObject) || | 5760 DCHECK((search_target_ == kAnyGlobalObject) || |
5761 search_target_->IsHeapObject()); | 5761 search_target_->IsHeapObject()); |
5762 found_target_in_trace_ = false; | 5762 found_target_in_trace_ = false; |
5763 Reset(); | 5763 Reset(); |
5764 | 5764 |
5765 MarkVisitor mark_visitor(this); | 5765 MarkVisitor mark_visitor(this); |
5766 MarkRecursively(root, &mark_visitor); | 5766 MarkRecursively(root, &mark_visitor); |
5767 | 5767 |
5768 UnmarkVisitor unmark_visitor(this); | 5768 UnmarkVisitor unmark_visitor(this); |
5769 UnmarkRecursively(root, &unmark_visitor); | 5769 UnmarkRecursively(root, &unmark_visitor); |
5770 | 5770 |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5845 } | 5845 } |
5846 | 5846 |
5847 | 5847 |
5848 void PathTracer::ProcessResults() { | 5848 void PathTracer::ProcessResults() { |
5849 if (found_target_) { | 5849 if (found_target_) { |
5850 OFStream os(stdout); | 5850 OFStream os(stdout); |
5851 os << "=====================================\n" | 5851 os << "=====================================\n" |
5852 << "==== Path to object ====\n" | 5852 << "==== Path to object ====\n" |
5853 << "=====================================\n\n"; | 5853 << "=====================================\n\n"; |
5854 | 5854 |
5855 ASSERT(!object_stack_.is_empty()); | 5855 DCHECK(!object_stack_.is_empty()); |
5856 for (int i = 0; i < object_stack_.length(); i++) { | 5856 for (int i = 0; i < object_stack_.length(); i++) { |
5857 if (i > 0) os << "\n |\n |\n V\n\n"; | 5857 if (i > 0) os << "\n |\n |\n V\n\n"; |
5858 object_stack_[i]->Print(os); | 5858 object_stack_[i]->Print(os); |
5859 } | 5859 } |
5860 os << "=====================================\n"; | 5860 os << "=====================================\n"; |
5861 } | 5861 } |
5862 } | 5862 } |
5863 | 5863 |
5864 | 5864 |
5865 // Triggers a depth-first traversal of reachable objects from one | 5865 // Triggers a depth-first traversal of reachable objects from one |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5935 DisallowHeapAllocation no_gc; | 5935 DisallowHeapAllocation no_gc; |
5936 if (!name->IsUniqueName()) { | 5936 if (!name->IsUniqueName()) { |
5937 if (!StringTable::InternalizeStringIfExists(name->GetIsolate(), | 5937 if (!StringTable::InternalizeStringIfExists(name->GetIsolate(), |
5938 Handle<String>::cast(name)). | 5938 Handle<String>::cast(name)). |
5939 ToHandle(&name)) { | 5939 ToHandle(&name)) { |
5940 return; | 5940 return; |
5941 } | 5941 } |
5942 } | 5942 } |
5943 // This cache is cleared only between mark compact passes, so we expect the | 5943 // This cache is cleared only between mark compact passes, so we expect the |
5944 // cache to only contain old space names. | 5944 // cache to only contain old space names. |
5945 ASSERT(!map->GetIsolate()->heap()->InNewSpace(*name)); | 5945 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name)); |
5946 | 5946 |
5947 int index = (Hash(map, name) & kHashMask); | 5947 int index = (Hash(map, name) & kHashMask); |
5948 // After a GC there will be free slots, so we use them in order (this may | 5948 // After a GC there will be free slots, so we use them in order (this may |
5949 // help to get the most frequently used one in position 0). | 5949 // help to get the most frequently used one in position 0). |
5950 for (int i = 0; i< kEntriesPerBucket; i++) { | 5950 for (int i = 0; i< kEntriesPerBucket; i++) { |
5951 Key& key = keys_[index]; | 5951 Key& key = keys_[index]; |
5952 Object* free_entry_indicator = NULL; | 5952 Object* free_entry_indicator = NULL; |
5953 if (key.map == free_entry_indicator) { | 5953 if (key.map == free_entry_indicator) { |
5954 key.map = *map; | 5954 key.map = *map; |
5955 key.name = *name; | 5955 key.name = *name; |
(...skipping 27 matching lines...) Expand all Loading... |
5983 for (int index = 0; index < kLength; index++) keys_[index].source = NULL; | 5983 for (int index = 0; index < kLength; index++) keys_[index].source = NULL; |
5984 } | 5984 } |
5985 | 5985 |
5986 | 5986 |
5987 void ExternalStringTable::CleanUp() { | 5987 void ExternalStringTable::CleanUp() { |
5988 int last = 0; | 5988 int last = 0; |
5989 for (int i = 0; i < new_space_strings_.length(); ++i) { | 5989 for (int i = 0; i < new_space_strings_.length(); ++i) { |
5990 if (new_space_strings_[i] == heap_->the_hole_value()) { | 5990 if (new_space_strings_[i] == heap_->the_hole_value()) { |
5991 continue; | 5991 continue; |
5992 } | 5992 } |
5993 ASSERT(new_space_strings_[i]->IsExternalString()); | 5993 DCHECK(new_space_strings_[i]->IsExternalString()); |
5994 if (heap_->InNewSpace(new_space_strings_[i])) { | 5994 if (heap_->InNewSpace(new_space_strings_[i])) { |
5995 new_space_strings_[last++] = new_space_strings_[i]; | 5995 new_space_strings_[last++] = new_space_strings_[i]; |
5996 } else { | 5996 } else { |
5997 old_space_strings_.Add(new_space_strings_[i]); | 5997 old_space_strings_.Add(new_space_strings_[i]); |
5998 } | 5998 } |
5999 } | 5999 } |
6000 new_space_strings_.Rewind(last); | 6000 new_space_strings_.Rewind(last); |
6001 new_space_strings_.Trim(); | 6001 new_space_strings_.Trim(); |
6002 | 6002 |
6003 last = 0; | 6003 last = 0; |
6004 for (int i = 0; i < old_space_strings_.length(); ++i) { | 6004 for (int i = 0; i < old_space_strings_.length(); ++i) { |
6005 if (old_space_strings_[i] == heap_->the_hole_value()) { | 6005 if (old_space_strings_[i] == heap_->the_hole_value()) { |
6006 continue; | 6006 continue; |
6007 } | 6007 } |
6008 ASSERT(old_space_strings_[i]->IsExternalString()); | 6008 DCHECK(old_space_strings_[i]->IsExternalString()); |
6009 ASSERT(!heap_->InNewSpace(old_space_strings_[i])); | 6009 DCHECK(!heap_->InNewSpace(old_space_strings_[i])); |
6010 old_space_strings_[last++] = old_space_strings_[i]; | 6010 old_space_strings_[last++] = old_space_strings_[i]; |
6011 } | 6011 } |
6012 old_space_strings_.Rewind(last); | 6012 old_space_strings_.Rewind(last); |
6013 old_space_strings_.Trim(); | 6013 old_space_strings_.Trim(); |
6014 #ifdef VERIFY_HEAP | 6014 #ifdef VERIFY_HEAP |
6015 if (FLAG_verify_heap) { | 6015 if (FLAG_verify_heap) { |
6016 Verify(); | 6016 Verify(); |
6017 } | 6017 } |
6018 #endif | 6018 #endif |
6019 } | 6019 } |
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6166 static_cast<int>(object_sizes_last_time_[index])); | 6166 static_cast<int>(object_sizes_last_time_[index])); |
6167 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) | 6167 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) |
6168 #undef ADJUST_LAST_TIME_OBJECT_COUNT | 6168 #undef ADJUST_LAST_TIME_OBJECT_COUNT |
6169 | 6169 |
6170 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); | 6170 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); |
6171 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); | 6171 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); |
6172 ClearObjectStats(); | 6172 ClearObjectStats(); |
6173 } | 6173 } |
6174 | 6174 |
6175 } } // namespace v8::internal | 6175 } } // namespace v8::internal |
OLD | NEW |