Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(83)

Side by Side Diff: src/heap.cc

Issue 8477030: Ensure that promotion queue does not overlap with objects relocated to ToSpace. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: remove wrong assertion Created 9 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
136 max_alive_after_gc_(0), 136 max_alive_after_gc_(0),
137 min_in_mutator_(kMaxInt), 137 min_in_mutator_(kMaxInt),
138 alive_after_last_gc_(0), 138 alive_after_last_gc_(0),
139 last_gc_end_timestamp_(0.0), 139 last_gc_end_timestamp_(0.0),
140 store_buffer_(this), 140 store_buffer_(this),
141 marking_(this), 141 marking_(this),
142 incremental_marking_(this), 142 incremental_marking_(this),
143 number_idle_notifications_(0), 143 number_idle_notifications_(0),
144 last_idle_notification_gc_count_(0), 144 last_idle_notification_gc_count_(0),
145 last_idle_notification_gc_count_init_(false), 145 last_idle_notification_gc_count_init_(false),
146 promotion_queue_(this),
146 configured_(false), 147 configured_(false),
147 chunks_queued_for_free_(NULL) { 148 chunks_queued_for_free_(NULL) {
148 // Allow build-time customization of the max semispace size. Building 149 // Allow build-time customization of the max semispace size. Building
149 // V8 with snapshots and a non-default max semispace size is much 150 // V8 with snapshots and a non-default max semispace size is much
150 // easier if you can define it as part of the build environment. 151 // easier if you can define it as part of the build environment.
151 #if defined(V8_MAX_SEMISPACE_SIZE) 152 #if defined(V8_MAX_SEMISPACE_SIZE)
152 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; 153 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
153 #endif 154 #endif
154 155
155 intptr_t max_virtual = OS::MaxVirtualMemory(); 156 intptr_t max_virtual = OS::MaxVirtualMemory();
(...skipping 825 matching lines...) Expand 10 before | Expand all | Expand 10 after
981 current_page_->set_scan_on_scavenge(true); 982 current_page_->set_scan_on_scavenge(true);
982 ASSERT(start_of_current_page_ != store_buffer_->Top()); 983 ASSERT(start_of_current_page_ != store_buffer_->Top());
983 store_buffer_->SetTop(start_of_current_page_); 984 store_buffer_->SetTop(start_of_current_page_);
984 } 985 }
985 } else { 986 } else {
986 UNREACHABLE(); 987 UNREACHABLE();
987 } 988 }
988 } 989 }
989 990
990 991
992 void PromotionQueue::Initialize() {
993 // Assumes that a NewSpacePage exactly fits a number of promotion queue
994 // entries (where each is a pair of intptr_t). This allows us to simplify
995 // the test fpr when to switch pages.
996 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
997 == 0);
998 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
999 front_ = rear_ =
1000 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1001 emergency_stack_ = NULL;
1002 guard_ = false;
1003 }
1004
1005
1006 void PromotionQueue::RelocateQueueHead() {
1007 ASSERT(emergency_stack_ == NULL);
1008
1009 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1010 intptr_t* head_start = rear_;
1011 intptr_t* head_end =
1012 Min(front_, reinterpret_cast<intptr_t*>(p->body_limit()));
1013
1014 int entries_count = (head_end - head_start) / kEntrySizeInWords;
1015
1016 emergency_stack_ = new List<Entry>(2 * entries_count);
1017
1018 while (head_start != head_end) {
1019 int size = *(head_start++);
1020 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1021 emergency_stack_->Add(Entry(obj, size));
1022 }
1023 rear_ = head_end;
1024 }
1025
1026
991 void Heap::Scavenge() { 1027 void Heap::Scavenge() {
992 #ifdef DEBUG 1028 #ifdef DEBUG
993 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(); 1029 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
994 #endif 1030 #endif
995 1031
996 gc_state_ = SCAVENGE; 1032 gc_state_ = SCAVENGE;
997 1033
998 // Implements Cheney's copying algorithm 1034 // Implements Cheney's copying algorithm
999 LOG(isolate_, ResourceEvent("scavenge", "begin")); 1035 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1000 1036
(...skipping 28 matching lines...) Expand all
1029 // We treat the top of the to space as a queue of addresses of 1065 // We treat the top of the to space as a queue of addresses of
1030 // promoted objects. The addresses of newly promoted and unswept 1066 // promoted objects. The addresses of newly promoted and unswept
1031 // objects lie between a 'front' mark and a 'rear' mark that is 1067 // objects lie between a 'front' mark and a 'rear' mark that is
1032 // updated as a side effect of promoting an object. 1068 // updated as a side effect of promoting an object.
1033 // 1069 //
1034 // There is guaranteed to be enough room at the top of the to space 1070 // There is guaranteed to be enough room at the top of the to space
1035 // for the addresses of promoted objects: every object promoted 1071 // for the addresses of promoted objects: every object promoted
1036 // frees up its size in bytes from the top of the new space, and 1072 // frees up its size in bytes from the top of the new space, and
1037 // objects are at least one pointer in size. 1073 // objects are at least one pointer in size.
1038 Address new_space_front = new_space_.ToSpaceStart(); 1074 Address new_space_front = new_space_.ToSpaceStart();
1039 promotion_queue_.Initialize(new_space_.ToSpaceEnd()); 1075 promotion_queue_.Initialize();
1040 1076
1041 #ifdef DEBUG 1077 #ifdef DEBUG
1042 store_buffer()->Clean(); 1078 store_buffer()->Clean();
1043 #endif 1079 #endif
1044 1080
1045 ScavengeVisitor scavenge_visitor(this); 1081 ScavengeVisitor scavenge_visitor(this);
1046 // Copy roots. 1082 // Copy roots.
1047 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); 1083 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1048 1084
1049 // Copy objects reachable from the old generation. 1085 // Copy objects reachable from the old generation.
(...skipping 19 matching lines...) Expand all
1069 // Scavenge object reachable from the global contexts list directly. 1105 // Scavenge object reachable from the global contexts list directly.
1070 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_)); 1106 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1071 1107
1072 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); 1108 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1073 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles( 1109 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1074 &IsUnscavengedHeapObject); 1110 &IsUnscavengedHeapObject);
1075 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots( 1111 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1076 &scavenge_visitor); 1112 &scavenge_visitor);
1077 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); 1113 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1078 1114
1079
1080 UpdateNewSpaceReferencesInExternalStringTable( 1115 UpdateNewSpaceReferencesInExternalStringTable(
1081 &UpdateNewSpaceReferenceInExternalStringTableEntry); 1116 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1082 1117
1118 promotion_queue_.Destroy();
1119
1083 LiveObjectList::UpdateReferencesForScavengeGC(); 1120 LiveObjectList::UpdateReferencesForScavengeGC();
1084 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); 1121 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1085 incremental_marking()->UpdateMarkingDequeAfterScavenge(); 1122 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1086 1123
1087 ASSERT(new_space_front == new_space_.top()); 1124 ASSERT(new_space_front == new_space_.top());
1088 1125
1089 // Set age mark. 1126 // Set age mark.
1090 new_space_.set_age_mark(new_space_.top()); 1127 new_space_.set_age_mark(new_space_.top());
1091 1128
1092 new_space_.LowerInlineAllocationLimit( 1129 new_space_.LowerInlineAllocationLimit(
(...skipping 386 matching lines...) Expand 10 before | Expand all | Expand 10 after
1479 1516
1480 if (object_contents == POINTER_OBJECT) { 1517 if (object_contents == POINTER_OBJECT) {
1481 heap->promotion_queue()->insert(target, object_size); 1518 heap->promotion_queue()->insert(target, object_size);
1482 } 1519 }
1483 1520
1484 heap->tracer()->increment_promoted_objects_size(object_size); 1521 heap->tracer()->increment_promoted_objects_size(object_size);
1485 return; 1522 return;
1486 } 1523 }
1487 } 1524 }
1488 MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size); 1525 MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
1526 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1489 Object* result = allocation->ToObjectUnchecked(); 1527 Object* result = allocation->ToObjectUnchecked();
1490 1528
1491 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size); 1529 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
1492 return; 1530 return;
1493 } 1531 }
1494 1532
1495 1533
1496 static inline void EvacuateJSFunction(Map* map, 1534 static inline void EvacuateJSFunction(Map* map,
1497 HeapObject** slot, 1535 HeapObject** slot,
1498 HeapObject* object) { 1536 HeapObject* object) {
(...skipping 4914 matching lines...) Expand 10 before | Expand all | Expand 10 after
6413 isolate_->heap()->store_buffer()->Compact(); 6451 isolate_->heap()->store_buffer()->Compact();
6414 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); 6452 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6415 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { 6453 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6416 next = chunk->next_chunk(); 6454 next = chunk->next_chunk();
6417 isolate_->memory_allocator()->Free(chunk); 6455 isolate_->memory_allocator()->Free(chunk);
6418 } 6456 }
6419 chunks_queued_for_free_ = NULL; 6457 chunks_queued_for_free_ = NULL;
6420 } 6458 }
6421 6459
6422 } } // namespace v8::internal 6460 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698