Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(345)

Side by Side Diff: src/heap.cc

Issue 8477030: Ensure that promotion queue does not overlap with objects relocated to ToSpace. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 9 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 967 matching lines...) Expand 10 before | Expand all | Expand 10 after
978 current_page_->set_scan_on_scavenge(true); 978 current_page_->set_scan_on_scavenge(true);
979 ASSERT(start_of_current_page_ != store_buffer_->Top()); 979 ASSERT(start_of_current_page_ != store_buffer_->Top());
980 store_buffer_->SetTop(start_of_current_page_); 980 store_buffer_->SetTop(start_of_current_page_);
981 } 981 }
982 } else { 982 } else {
983 UNREACHABLE(); 983 UNREACHABLE();
984 } 984 }
985 } 985 }
986 986
987 987
988 void PromotionQueue::SetNewLimit(Address limit) {
989 if (emergency_stack_ != NULL) return;
990
991 limit_ = reinterpret_cast<intptr_t*>(limit);
992
993 Page* queue_page = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
994 Page* limit_page = Page::FromAllocationTop(limit);
995
996 if (queue_page != limit_page || limit_ <= rear_) {
997 return;
998 }
999
1000 RelocateQueueHead();
1001 }
1002
1003
1004 void PromotionQueue::RelocateQueueHead() {
1005 ASSERT(emergency_stack_ == NULL);
1006
1007 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1008 intptr_t* head_start = rear_;
1009 intptr_t* head_end =
1010 Min(front_, reinterpret_cast<intptr_t*>(p->body_limit()));
1011
1012 int entries_count = (head_end - head_start) / kEntrySizeInWords;
1013
1014 emergency_stack_ = new List<Entry>(2 * entries_count);
1015
1016 while (head_start != head_end) {
1017 int size = *(head_start++);
1018 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1019 emergency_stack_->Add(Entry(obj, size));
1020 }
1021 rear_ = head_end;
1022
1023 ASSERT(emergency_stack_->length() > 0);
1024 }
1025
1026
988 void Heap::Scavenge() { 1027 void Heap::Scavenge() {
989 #ifdef DEBUG 1028 #ifdef DEBUG
990 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(); 1029 if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
991 #endif 1030 #endif
992 1031
993 gc_state_ = SCAVENGE; 1032 gc_state_ = SCAVENGE;
994 1033
995 // Implements Cheney's copying algorithm 1034 // Implements Cheney's copying algorithm
996 LOG(isolate_, ResourceEvent("scavenge", "begin")); 1035 LOG(isolate_, ResourceEvent("scavenge", "begin"));
997 1036
(...skipping 28 matching lines...) Expand all
1026 // We treat the top of the to space as a queue of addresses of 1065 // We treat the top of the to space as a queue of addresses of
1027 // promoted objects. The addresses of newly promoted and unswept 1066 // promoted objects. The addresses of newly promoted and unswept
1028 // objects lie between a 'front' mark and a 'rear' mark that is 1067 // objects lie between a 'front' mark and a 'rear' mark that is
1029 // updated as a side effect of promoting an object. 1068 // updated as a side effect of promoting an object.
1030 // 1069 //
1031 // There is guaranteed to be enough room at the top of the to space 1070 // There is guaranteed to be enough room at the top of the to space
1032 // for the addresses of promoted objects: every object promoted 1071 // for the addresses of promoted objects: every object promoted
1033 // frees up its size in bytes from the top of the new space, and 1072 // frees up its size in bytes from the top of the new space, and
1034 // objects are at least one pointer in size. 1073 // objects are at least one pointer in size.
1035 Address new_space_front = new_space_.ToSpaceStart(); 1074 Address new_space_front = new_space_.ToSpaceStart();
1036 promotion_queue_.Initialize(new_space_.ToSpaceEnd()); 1075 promotion_queue_.Initialize(new_space());
1037 1076
1038 #ifdef DEBUG 1077 #ifdef DEBUG
1039 store_buffer()->Clean(); 1078 store_buffer()->Clean();
1040 #endif 1079 #endif
1041 1080
1042 ScavengeVisitor scavenge_visitor(this); 1081 ScavengeVisitor scavenge_visitor(this);
1043 // Copy roots. 1082 // Copy roots.
1044 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); 1083 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1045 1084
1046 // Copy objects reachable from the old generation. 1085 // Copy objects reachable from the old generation.
(...skipping 19 matching lines...) Expand all
1066 // Scavenge object reachable from the global contexts list directly. 1105 // Scavenge object reachable from the global contexts list directly.
1067 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_)); 1106 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1068 1107
1069 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); 1108 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1070 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles( 1109 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1071 &IsUnscavengedHeapObject); 1110 &IsUnscavengedHeapObject);
1072 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots( 1111 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1073 &scavenge_visitor); 1112 &scavenge_visitor);
1074 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); 1113 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1075 1114
1076
1077 UpdateNewSpaceReferencesInExternalStringTable( 1115 UpdateNewSpaceReferencesInExternalStringTable(
1078 &UpdateNewSpaceReferenceInExternalStringTableEntry); 1116 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1079 1117
1118 promotion_queue_.Destroy();
1119
1080 LiveObjectList::UpdateReferencesForScavengeGC(); 1120 LiveObjectList::UpdateReferencesForScavengeGC();
1081 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); 1121 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1082 incremental_marking()->UpdateMarkingDequeAfterScavenge(); 1122 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1083 1123
1084 ASSERT(new_space_front == new_space_.top()); 1124 ASSERT(new_space_front == new_space_.top());
1085 1125
1086 // Set age mark. 1126 // Set age mark.
1087 new_space_.set_age_mark(new_space_.top()); 1127 new_space_.set_age_mark(new_space_.top());
1088 1128
1089 new_space_.LowerInlineAllocationLimit( 1129 new_space_.LowerInlineAllocationLimit(
(...skipping 386 matching lines...) Expand 10 before | Expand all | Expand 10 after
1476 1516
1477 if (object_contents == POINTER_OBJECT) { 1517 if (object_contents == POINTER_OBJECT) {
1478 heap->promotion_queue()->insert(target, object_size); 1518 heap->promotion_queue()->insert(target, object_size);
1479 } 1519 }
1480 1520
1481 heap->tracer()->increment_promoted_objects_size(object_size); 1521 heap->tracer()->increment_promoted_objects_size(object_size);
1482 return; 1522 return;
1483 } 1523 }
1484 } 1524 }
1485 MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size); 1525 MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
1526 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1486 Object* result = allocation->ToObjectUnchecked(); 1527 Object* result = allocation->ToObjectUnchecked();
1487 1528
1488 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size); 1529 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
1489 return; 1530 return;
1490 } 1531 }
1491 1532
1492 1533
1493 static inline void EvacuateJSFunction(Map* map, 1534 static inline void EvacuateJSFunction(Map* map,
1494 HeapObject** slot, 1535 HeapObject** slot,
1495 HeapObject* object) { 1536 HeapObject* object) {
(...skipping 4914 matching lines...) Expand 10 before | Expand all | Expand 10 after
6410 isolate_->heap()->store_buffer()->Compact(); 6451 isolate_->heap()->store_buffer()->Compact();
6411 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); 6452 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6412 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { 6453 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6413 next = chunk->next_chunk(); 6454 next = chunk->next_chunk();
6414 isolate_->memory_allocator()->Free(chunk); 6455 isolate_->memory_allocator()->Free(chunk);
6415 } 6456 }
6416 chunks_queued_for_free_ = NULL; 6457 chunks_queued_for_free_ = NULL;
6417 } 6458 }
6418 6459
6419 } } // namespace v8::internal 6460 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698