Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: src/mark-compact.h

Issue 7945009: Merge experimental/gc branch to the bleeding_edge. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/log.cc ('k') | src/mark-compact.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #ifndef V8_MARK_COMPACT_H_ 28 #ifndef V8_MARK_COMPACT_H_
29 #define V8_MARK_COMPACT_H_ 29 #define V8_MARK_COMPACT_H_
30 30
31 #include "compiler-intrinsics.h"
31 #include "spaces.h" 32 #include "spaces.h"
32 33
33 namespace v8 { 34 namespace v8 {
34 namespace internal { 35 namespace internal {
35 36
36 // Callback function, returns whether an object is alive. The heap size 37 // Callback function, returns whether an object is alive. The heap size
37 // of the object is returned in size. It optionally updates the offset 38 // of the object is returned in size. It optionally updates the offset
38 // to the first live object in the page (only used for old and map objects). 39 // to the first live object in the page (only used for old and map objects).
39 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); 40 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
40 41
41 // Forward declarations. 42 // Forward declarations.
42 class CodeFlusher; 43 class CodeFlusher;
43 class GCTracer; 44 class GCTracer;
44 class MarkingVisitor; 45 class MarkingVisitor;
45 class RootMarkingVisitor; 46 class RootMarkingVisitor;
46 47
47 48
49 class Marking {
50 public:
51 explicit Marking(Heap* heap)
52 : heap_(heap) {
53 }
54
55 static inline MarkBit MarkBitFrom(Address addr);
56
57 static inline MarkBit MarkBitFrom(HeapObject* obj) {
58 return MarkBitFrom(reinterpret_cast<Address>(obj));
59 }
60
61 // Impossible markbits: 01
62 static const char* kImpossibleBitPattern;
63 static inline bool IsImpossible(MarkBit mark_bit) {
64 ASSERT(strcmp(kImpossibleBitPattern, "01") == 0);
65 return !mark_bit.Get() && mark_bit.Next().Get();
66 }
67
68 // Black markbits: 10 - this is required by the sweeper.
69 static const char* kBlackBitPattern;
70 static inline bool IsBlack(MarkBit mark_bit) {
71 ASSERT(strcmp(kBlackBitPattern, "10") == 0);
72 ASSERT(!IsImpossible(mark_bit));
73 return mark_bit.Get() && !mark_bit.Next().Get();
74 }
75
76 // White markbits: 00 - this is required by the mark bit clearer.
77 static const char* kWhiteBitPattern;
78 static inline bool IsWhite(MarkBit mark_bit) {
79 ASSERT(strcmp(kWhiteBitPattern, "00") == 0);
80 ASSERT(!IsImpossible(mark_bit));
81 return !mark_bit.Get();
82 }
83
84 // Grey markbits: 11
85 static const char* kGreyBitPattern;
86 static inline bool IsGrey(MarkBit mark_bit) {
87 ASSERT(strcmp(kGreyBitPattern, "11") == 0);
88 ASSERT(!IsImpossible(mark_bit));
89 return mark_bit.Get() && mark_bit.Next().Get();
90 }
91
92 static inline void MarkBlack(MarkBit mark_bit) {
93 mark_bit.Set();
94 mark_bit.Next().Clear();
95 ASSERT(Marking::IsBlack(mark_bit));
96 }
97
98 static inline void BlackToGrey(MarkBit markbit) {
99 ASSERT(IsBlack(markbit));
100 markbit.Next().Set();
101 ASSERT(IsGrey(markbit));
102 }
103
104 static inline void WhiteToGrey(MarkBit markbit) {
105 ASSERT(IsWhite(markbit));
106 markbit.Set();
107 markbit.Next().Set();
108 ASSERT(IsGrey(markbit));
109 }
110
111 static inline void GreyToBlack(MarkBit markbit) {
112 ASSERT(IsGrey(markbit));
113 markbit.Next().Clear();
114 ASSERT(IsBlack(markbit));
115 }
116
117 static inline void BlackToGrey(HeapObject* obj) {
118 ASSERT(obj->Size() >= 2 * kPointerSize);
119 BlackToGrey(MarkBitFrom(obj));
120 }
121
122 static inline void AnyToGrey(MarkBit markbit) {
123 markbit.Set();
124 markbit.Next().Set();
125 ASSERT(IsGrey(markbit));
126 }
127
128 // Returns true if the the object whose mark is transferred is marked black.
129 bool TransferMark(Address old_start, Address new_start);
130
131 #ifdef DEBUG
132 enum ObjectColor {
133 BLACK_OBJECT,
134 WHITE_OBJECT,
135 GREY_OBJECT,
136 IMPOSSIBLE_COLOR
137 };
138
139 static const char* ColorName(ObjectColor color) {
140 switch (color) {
141 case BLACK_OBJECT: return "black";
142 case WHITE_OBJECT: return "white";
143 case GREY_OBJECT: return "grey";
144 case IMPOSSIBLE_COLOR: return "impossible";
145 }
146 return "error";
147 }
148
149 static ObjectColor Color(HeapObject* obj) {
150 return Color(Marking::MarkBitFrom(obj));
151 }
152
153 static ObjectColor Color(MarkBit mark_bit) {
154 if (IsBlack(mark_bit)) return BLACK_OBJECT;
155 if (IsWhite(mark_bit)) return WHITE_OBJECT;
156 if (IsGrey(mark_bit)) return GREY_OBJECT;
157 UNREACHABLE();
158 return IMPOSSIBLE_COLOR;
159 }
160 #endif
161
162 // Returns true if the transferred color is black.
163 INLINE(static bool TransferColor(HeapObject* from,
164 HeapObject* to)) {
165 MarkBit from_mark_bit = MarkBitFrom(from);
166 MarkBit to_mark_bit = MarkBitFrom(to);
167 bool is_black = false;
168 if (from_mark_bit.Get()) {
169 to_mark_bit.Set();
170 is_black = true; // Looks black so far.
171 }
172 if (from_mark_bit.Next().Get()) {
173 to_mark_bit.Next().Set();
174 is_black = false; // Was actually gray.
175 }
176 ASSERT(Color(from) == Color(to));
177 ASSERT(is_black == (Color(to) == BLACK_OBJECT));
178 return is_black;
179 }
180
181 private:
182 Heap* heap_;
183 };
184
48 // ---------------------------------------------------------------------------- 185 // ----------------------------------------------------------------------------
49 // Marking stack for tracing live objects. 186 // Marking deque for tracing live objects.
50 187
51 class MarkingStack { 188 class MarkingDeque {
52 public: 189 public:
53 MarkingStack() : low_(NULL), top_(NULL), high_(NULL), overflowed_(false) { } 190 MarkingDeque()
191 : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
54 192
55 void Initialize(Address low, Address high) { 193 void Initialize(Address low, Address high) {
56 top_ = low_ = reinterpret_cast<HeapObject**>(low); 194 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
57 high_ = reinterpret_cast<HeapObject**>(high); 195 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
196 array_ = obj_low;
197 mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
198 top_ = bottom_ = 0;
58 overflowed_ = false; 199 overflowed_ = false;
59 } 200 }
60 201
61 bool is_full() const { return top_ >= high_; } 202 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
62 203
63 bool is_empty() const { return top_ <= low_; } 204 inline bool IsEmpty() { return top_ == bottom_; }
64 205
65 bool overflowed() const { return overflowed_; } 206 bool overflowed() const { return overflowed_; }
66 207
67 void clear_overflowed() { overflowed_ = false; } 208 void ClearOverflowed() { overflowed_ = false; }
209
210 void SetOverflowed() { overflowed_ = true; }
68 211
69 // Push the (marked) object on the marking stack if there is room, 212 // Push the (marked) object on the marking stack if there is room,
70 // otherwise mark the object as overflowed and wait for a rescan of the 213 // otherwise mark the object as overflowed and wait for a rescan of the
71 // heap. 214 // heap.
72 void Push(HeapObject* object) { 215 inline void PushBlack(HeapObject* object) {
73 CHECK(object->IsHeapObject()); 216 ASSERT(object->IsHeapObject());
74 if (is_full()) { 217 if (IsFull()) {
75 object->SetOverflow(); 218 Marking::BlackToGrey(object);
76 overflowed_ = true; 219 SetOverflowed();
77 } else { 220 } else {
78 *(top_++) = object; 221 array_[top_] = object;
79 } 222 top_ = ((top_ + 1) & mask_);
80 } 223 }
81 224 }
82 HeapObject* Pop() { 225
83 ASSERT(!is_empty()); 226 inline void PushGrey(HeapObject* object) {
84 HeapObject* object = *(--top_); 227 ASSERT(object->IsHeapObject());
85 CHECK(object->IsHeapObject()); 228 if (IsFull()) {
229 ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
230 SetOverflowed();
231 } else {
232 array_[top_] = object;
233 top_ = ((top_ + 1) & mask_);
234 }
235 }
236
237 inline HeapObject* Pop() {
238 ASSERT(!IsEmpty());
239 top_ = ((top_ - 1) & mask_);
240 HeapObject* object = array_[top_];
241 ASSERT(object->IsHeapObject());
86 return object; 242 return object;
87 } 243 }
88 244
245 inline void UnshiftGrey(HeapObject* object) {
246 ASSERT(object->IsHeapObject());
247 if (IsFull()) {
248 ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
249 SetOverflowed();
250 } else {
251 bottom_ = ((bottom_ - 1) & mask_);
252 array_[bottom_] = object;
253 }
254 }
255
256 HeapObject** array() { return array_; }
257 int bottom() { return bottom_; }
258 int top() { return top_; }
259 int mask() { return mask_; }
260 void set_top(int top) { top_ = top; }
261
89 private: 262 private:
90 HeapObject** low_; 263 HeapObject** array_;
91 HeapObject** top_; 264 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
92 HeapObject** high_; 265 // empty when top_ == bottom_. It is full when top_ + 1 == bottom
266 // (mod mask + 1).
267 int top_;
268 int bottom_;
269 int mask_;
93 bool overflowed_; 270 bool overflowed_;
94 271
95 DISALLOW_COPY_AND_ASSIGN(MarkingStack); 272 DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
273 };
274
275
276 class SlotsBufferAllocator {
277 public:
278 SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
279 void DeallocateBuffer(SlotsBuffer* buffer);
280
281 void DeallocateChain(SlotsBuffer** buffer_address);
282 };
283
284
285 // SlotsBuffer records a sequence of slots that has to be updated
286 // after live objects were relocated from evacuation candidates.
287 // All slots are either untyped or typed:
288 // - Untyped slots are expected to contain a tagged object pointer.
289 // They are recorded by an address.
290 // - Typed slots are expected to contain an encoded pointer to a heap
291 // object where the way of encoding depends on the type of the slot.
292 // They are recorded as a pair (SlotType, slot address).
293 // We assume that zero-page is never mapped this allows us to distinguish
294 // untyped slots from typed slots during iteration by a simple comparison:
295 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
296 // is the first element of typed slot's pair.
297 class SlotsBuffer {
298 public:
299 typedef Object** ObjectSlot;
300
301 explicit SlotsBuffer(SlotsBuffer* next_buffer)
302 : idx_(0), chain_length_(1), next_(next_buffer) {
303 if (next_ != NULL) {
304 chain_length_ = next_->chain_length_ + 1;
305 }
306 }
307
308 ~SlotsBuffer() {
309 }
310
311 void Add(ObjectSlot slot) {
312 ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
313 slots_[idx_++] = slot;
314 }
315
316 enum SlotType {
317 RELOCATED_CODE_OBJECT,
318 CODE_TARGET_SLOT,
319 CODE_ENTRY_SLOT,
320 DEBUG_TARGET_SLOT,
321 JS_RETURN_SLOT,
322 NUMBER_OF_SLOT_TYPES
323 };
324
325 void UpdateSlots(Heap* heap);
326
327 SlotsBuffer* next() { return next_; }
328
329 static int SizeOfChain(SlotsBuffer* buffer) {
330 if (buffer == NULL) return 0;
331 return static_cast<int>(buffer->idx_ +
332 (buffer->chain_length_ - 1) * kNumberOfElements);
333 }
334
335 inline bool IsFull() {
336 return idx_ == kNumberOfElements;
337 }
338
339 inline bool HasSpaceForTypedSlot() {
340 return idx_ < kNumberOfElements - 1;
341 }
342
343 static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer) {
344 while (buffer != NULL) {
345 buffer->UpdateSlots(heap);
346 buffer = buffer->next();
347 }
348 }
349
350 enum AdditionMode {
351 FAIL_ON_OVERFLOW,
352 IGNORE_OVERFLOW
353 };
354
355 static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
356 return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
357 }
358
359 static bool AddTo(SlotsBufferAllocator* allocator,
360 SlotsBuffer** buffer_address,
361 ObjectSlot slot,
362 AdditionMode mode) {
363 SlotsBuffer* buffer = *buffer_address;
364 if (buffer == NULL || buffer->IsFull()) {
365 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
366 allocator->DeallocateChain(buffer_address);
367 return false;
368 }
369 buffer = allocator->AllocateBuffer(buffer);
370 *buffer_address = buffer;
371 }
372 buffer->Add(slot);
373 return true;
374 }
375
376 static bool IsTypedSlot(ObjectSlot slot);
377
378 static bool AddTo(SlotsBufferAllocator* allocator,
379 SlotsBuffer** buffer_address,
380 SlotType type,
381 Address addr,
382 AdditionMode mode);
383
384 static const int kNumberOfElements = 1021;
385
386 private:
387 static const int kChainLengthThreshold = 6;
388
389 intptr_t idx_;
390 intptr_t chain_length_;
391 SlotsBuffer* next_;
392 ObjectSlot slots_[kNumberOfElements];
96 }; 393 };
97 394
98 395
99 // ------------------------------------------------------------------------- 396 // -------------------------------------------------------------------------
100 // Mark-Compact collector 397 // Mark-Compact collector
101
102 class OverflowedObjectsScanner;
103
104 class MarkCompactCollector { 398 class MarkCompactCollector {
105 public: 399 public:
106 // Type of functions to compute forwarding addresses of objects in 400 // Type of functions to compute forwarding addresses of objects in
107 // compacted spaces. Given an object and its size, return a (non-failure) 401 // compacted spaces. Given an object and its size, return a (non-failure)
108 // Object* that will be the object after forwarding. There is a separate 402 // Object* that will be the object after forwarding. There is a separate
109 // allocation function for each (compactable) space based on the location 403 // allocation function for each (compactable) space based on the location
110 // of the object before compaction. 404 // of the object before compaction.
111 typedef MaybeObject* (*AllocationFunction)(Heap* heap, 405 typedef MaybeObject* (*AllocationFunction)(Heap* heap,
112 HeapObject* object, 406 HeapObject* object,
113 int object_size); 407 int object_size);
(...skipping 13 matching lines...) Expand all
127 int* offset); 421 int* offset);
128 422
129 // Type of functions to process non-live objects. 423 // Type of functions to process non-live objects.
130 typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate); 424 typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
131 425
132 // Pointer to member function, used in IterateLiveObjects. 426 // Pointer to member function, used in IterateLiveObjects.
133 typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj); 427 typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
134 428
135 // Set the global force_compaction flag, it must be called before Prepare 429 // Set the global force_compaction flag, it must be called before Prepare
136 // to take effect. 430 // to take effect.
137 void SetForceCompaction(bool value) { 431 inline void SetFlags(int flags);
138 force_compaction_ = value; 432
433 inline bool PreciseSweepingRequired() {
434 return sweep_precisely_;
139 } 435 }
140 436
437 static void Initialize();
141 438
142 static void Initialize(); 439 void CollectEvacuationCandidates(PagedSpace* space);
440
441 void AddEvacuationCandidate(Page* p);
143 442
144 // Prepares for GC by resetting relocation info in old and map spaces and 443 // Prepares for GC by resetting relocation info in old and map spaces and
145 // choosing spaces to compact. 444 // choosing spaces to compact.
146 void Prepare(GCTracer* tracer); 445 void Prepare(GCTracer* tracer);
147 446
148 // Performs a global garbage collection. 447 // Performs a global garbage collection.
149 void CollectGarbage(); 448 void CollectGarbage();
150 449
151 // True if the last full GC performed heap compaction. 450 bool StartCompaction();
152 bool HasCompacted() { return compacting_collection_; }
153
154 // True after the Prepare phase if the compaction is taking place.
155 bool IsCompacting() {
156 #ifdef DEBUG
157 // For the purposes of asserts we don't want this to keep returning true
158 // after the collection is completed.
159 return state_ != IDLE && compacting_collection_;
160 #else
161 return compacting_collection_;
162 #endif
163 }
164
165 // The count of the number of objects left marked at the end of the last
166 // completed full GC (expected to be zero).
167 int previous_marked_count() { return previous_marked_count_; }
168 451
169 // During a full GC, there is a stack-allocated GCTracer that is used for 452 // During a full GC, there is a stack-allocated GCTracer that is used for
170 // bookkeeping information. Return a pointer to that tracer. 453 // bookkeeping information. Return a pointer to that tracer.
171 GCTracer* tracer() { return tracer_; } 454 GCTracer* tracer() { return tracer_; }
172 455
173 #ifdef DEBUG 456 #ifdef DEBUG
174 // Checks whether performing mark-compact collection. 457 // Checks whether performing mark-compact collection.
175 bool in_use() { return state_ > PREPARE_GC; } 458 bool in_use() { return state_ > PREPARE_GC; }
176 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; } 459 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
177 #endif 460 #endif
178 461
179 // Determine type of object and emit deletion log event. 462 // Determine type of object and emit deletion log event.
180 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate); 463 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
181 464
182 // Returns size of a possibly marked object.
183 static int SizeOfMarkedObject(HeapObject* obj);
184
185 // Distinguishable invalid map encodings (for single word and multiple words) 465 // Distinguishable invalid map encodings (for single word and multiple words)
186 // that indicate free regions. 466 // that indicate free regions.
187 static const uint32_t kSingleFreeEncoding = 0; 467 static const uint32_t kSingleFreeEncoding = 0;
188 static const uint32_t kMultiFreeEncoding = 1; 468 static const uint32_t kMultiFreeEncoding = 1;
189 469
470 static inline bool IsMarked(Object* obj);
471
190 inline Heap* heap() const { return heap_; } 472 inline Heap* heap() const { return heap_; }
191 473
192 CodeFlusher* code_flusher() { return code_flusher_; } 474 CodeFlusher* code_flusher() { return code_flusher_; }
193 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; } 475 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
194 void EnableCodeFlushing(bool enable); 476 void EnableCodeFlushing(bool enable);
195 477
478 enum SweeperType {
479 CONSERVATIVE,
480 LAZY_CONSERVATIVE,
481 PRECISE
482 };
483
484 #ifdef DEBUG
485 void VerifyMarkbitsAreClean();
486 static void VerifyMarkbitsAreClean(PagedSpace* space);
487 static void VerifyMarkbitsAreClean(NewSpace* space);
488 #endif
489
490 // Sweep a single page from the given space conservatively.
491 // Return a number of reclaimed bytes.
492 static intptr_t SweepConservatively(PagedSpace* space, Page* p);
493
494 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
495 return Page::FromAddress(reinterpret_cast<Address>(anchor))->
496 ShouldSkipEvacuationSlotRecording();
497 }
498
499 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
500 return Page::FromAddress(reinterpret_cast<Address>(host))->
501 ShouldSkipEvacuationSlotRecording();
502 }
503
504 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
505 return Page::FromAddress(reinterpret_cast<Address>(obj))->
506 IsEvacuationCandidate();
507 }
508
509 void EvictEvacuationCandidate(Page* page) {
510 if (FLAG_trace_fragmentation) {
511 PrintF("Page %p is too popular. Disabling evacuation.\n",
512 reinterpret_cast<void*>(page));
513 }
514
515 // TODO(gc) If all evacuation candidates are too popular we
516 // should stop slots recording entirely.
517 page->ClearEvacuationCandidate();
518
519 // We were not collecting slots on this page that point
520 // to other evacuation candidates thus we have to
521 // rescan the page after evacuation to discover and update all
522 // pointers to evacuated objects.
523 if (page->owner()->identity() == OLD_DATA_SPACE) {
524 evacuation_candidates_.RemoveElement(page);
525 } else {
526 page->SetFlag(Page::RESCAN_ON_EVACUATION);
527 }
528 }
529
530 void RecordRelocSlot(RelocInfo* rinfo, Code* target);
531 void RecordCodeEntrySlot(Address slot, Code* target);
532
533 INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
534
535 void MigrateObject(Address dst,
536 Address src,
537 int size,
538 AllocationSpace to_old_space);
539
540 bool TryPromoteObject(HeapObject* object, int object_size);
541
196 inline Object* encountered_weak_maps() { return encountered_weak_maps_; } 542 inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
197 inline void set_encountered_weak_maps(Object* weak_map) { 543 inline void set_encountered_weak_maps(Object* weak_map) {
198 encountered_weak_maps_ = weak_map; 544 encountered_weak_maps_ = weak_map;
199 } 545 }
200 546
201 private: 547 private:
202 MarkCompactCollector(); 548 MarkCompactCollector();
203 ~MarkCompactCollector(); 549 ~MarkCompactCollector();
204 550
205 #ifdef DEBUG 551 #ifdef DEBUG
206 enum CollectorState { 552 enum CollectorState {
207 IDLE, 553 IDLE,
208 PREPARE_GC, 554 PREPARE_GC,
209 MARK_LIVE_OBJECTS, 555 MARK_LIVE_OBJECTS,
210 SWEEP_SPACES, 556 SWEEP_SPACES,
211 ENCODE_FORWARDING_ADDRESSES, 557 ENCODE_FORWARDING_ADDRESSES,
212 UPDATE_POINTERS, 558 UPDATE_POINTERS,
213 RELOCATE_OBJECTS 559 RELOCATE_OBJECTS
214 }; 560 };
215 561
216 // The current stage of the collector. 562 // The current stage of the collector.
217 CollectorState state_; 563 CollectorState state_;
218 #endif 564 #endif
219 565
220 // Global flag that forces a compaction. 566 // Global flag that forces sweeping to be precise, so we can traverse the
221 bool force_compaction_; 567 // heap.
568 bool sweep_precisely_;
222 569
223 // Global flag indicating whether spaces were compacted on the last GC. 570 // True if we are collecting slots to perform evacuation from evacuation
224 bool compacting_collection_; 571 // candidates.
572 bool compacting_;
225 573
226 // Global flag indicating whether spaces will be compacted on the next GC. 574 bool collect_maps_;
227 bool compact_on_next_gc_;
228
229 // The number of objects left marked at the end of the last completed full
230 // GC (expected to be zero).
231 int previous_marked_count_;
232 575
233 // A pointer to the current stack-allocated GC tracer object during a full 576 // A pointer to the current stack-allocated GC tracer object during a full
234 // collection (NULL before and after). 577 // collection (NULL before and after).
235 GCTracer* tracer_; 578 GCTracer* tracer_;
236 579
580 SlotsBufferAllocator slots_buffer_allocator_;
581
582 SlotsBuffer* migration_slots_buffer_;
583
237 // Finishes GC, performs heap verification if enabled. 584 // Finishes GC, performs heap verification if enabled.
238 void Finish(); 585 void Finish();
239 586
240 // ----------------------------------------------------------------------- 587 // -----------------------------------------------------------------------
241 // Phase 1: Marking live objects. 588 // Phase 1: Marking live objects.
242 // 589 //
243 // Before: The heap has been prepared for garbage collection by 590 // Before: The heap has been prepared for garbage collection by
244 // MarkCompactCollector::Prepare() and is otherwise in its 591 // MarkCompactCollector::Prepare() and is otherwise in its
245 // normal state. 592 // normal state.
246 // 593 //
247 // After: Live objects are marked and non-live objects are unmarked. 594 // After: Live objects are marked and non-live objects are unmarked.
248 595
249 596
250 friend class RootMarkingVisitor; 597 friend class RootMarkingVisitor;
251 friend class MarkingVisitor; 598 friend class MarkingVisitor;
252 friend class StaticMarkingVisitor; 599 friend class StaticMarkingVisitor;
253 friend class CodeMarkingVisitor; 600 friend class CodeMarkingVisitor;
254 friend class SharedFunctionInfoMarkingVisitor; 601 friend class SharedFunctionInfoMarkingVisitor;
255 602
256 void PrepareForCodeFlushing(); 603 void PrepareForCodeFlushing();
257 604
258 // Marking operations for objects reachable from roots. 605 // Marking operations for objects reachable from roots.
259 void MarkLiveObjects(); 606 void MarkLiveObjects();
260 607
261 void MarkUnmarkedObject(HeapObject* obj); 608 void AfterMarking();
262 609
263 inline void MarkObject(HeapObject* obj) { 610 INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
264 if (!obj->IsMarked()) MarkUnmarkedObject(obj);
265 }
266 611
267 inline void SetMark(HeapObject* obj); 612 INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
613
614 void ProcessNewlyMarkedObject(HeapObject* obj);
268 615
269 // Creates back pointers for all map transitions, stores them in 616 // Creates back pointers for all map transitions, stores them in
270 // the prototype field. The original prototype pointers are restored 617 // the prototype field. The original prototype pointers are restored
271 // in ClearNonLiveTransitions(). All JSObject maps 618 // in ClearNonLiveTransitions(). All JSObject maps
272 // connected by map transitions have the same prototype object, which 619 // connected by map transitions have the same prototype object, which
273 // is why we can use this field temporarily for back pointers. 620 // is why we can use this field temporarily for back pointers.
274 void CreateBackPointers(); 621 void CreateBackPointers();
275 622
276 // Mark a Map and its DescriptorArray together, skipping transitions. 623 // Mark a Map and its DescriptorArray together, skipping transitions.
277 void MarkMapContents(Map* map); 624 void MarkMapContents(Map* map);
(...skipping 13 matching lines...) Expand all
291 // Mark objects in implicit references groups if their parent object 638 // Mark objects in implicit references groups if their parent object
292 // is marked. 639 // is marked.
293 void MarkImplicitRefGroups(); 640 void MarkImplicitRefGroups();
294 641
295 // Mark all objects which are reachable due to host application 642 // Mark all objects which are reachable due to host application
296 // logic like object groups or implicit references' groups. 643 // logic like object groups or implicit references' groups.
297 void ProcessExternalMarking(); 644 void ProcessExternalMarking();
298 645
299 // Mark objects reachable (transitively) from objects in the marking stack 646 // Mark objects reachable (transitively) from objects in the marking stack
300 // or overflowed in the heap. 647 // or overflowed in the heap.
301 void ProcessMarkingStack(); 648 void ProcessMarkingDeque();
302 649
303 // Mark objects reachable (transitively) from objects in the marking 650 // Mark objects reachable (transitively) from objects in the marking
304 // stack. This function empties the marking stack, but may leave 651 // stack. This function empties the marking stack, but may leave
305 // overflowed objects in the heap, in which case the marking stack's 652 // overflowed objects in the heap, in which case the marking stack's
306 // overflow flag will be set. 653 // overflow flag will be set.
307 void EmptyMarkingStack(); 654 void EmptyMarkingDeque();
308 655
309 // Refill the marking stack with overflowed objects from the heap. This 656 // Refill the marking stack with overflowed objects from the heap. This
310 // function either leaves the marking stack full or clears the overflow 657 // function either leaves the marking stack full or clears the overflow
311 // flag on the marking stack. 658 // flag on the marking stack.
312 void RefillMarkingStack(); 659 void RefillMarkingDeque();
313 660
314 // After reachable maps have been marked process per context object 661 // After reachable maps have been marked process per context object
315 // literal map caches removing unmarked entries. 662 // literal map caches removing unmarked entries.
316 void ProcessMapCaches(); 663 void ProcessMapCaches();
317 664
318 // Callback function for telling whether the object *p is an unmarked 665 // Callback function for telling whether the object *p is an unmarked
319 // heap object. 666 // heap object.
320 static bool IsUnmarkedHeapObject(Object** p); 667 static bool IsUnmarkedHeapObject(Object** p);
321 668
322 #ifdef DEBUG 669 #ifdef DEBUG
323 void UpdateLiveObjectCount(HeapObject* obj); 670 void UpdateLiveObjectCount(HeapObject* obj);
324 #endif 671 #endif
325 672
326 // We sweep the large object space in the same way whether we are
327 // compacting or not, because the large object space is never compacted.
328 void SweepLargeObjectSpace();
329
330 // Test whether a (possibly marked) object is a Map.
331 static inline bool SafeIsMap(HeapObject* object);
332
333 // Map transitions from a live map to a dead map must be killed. 673 // Map transitions from a live map to a dead map must be killed.
334 // We replace them with a null descriptor, with the same key. 674 // We replace them with a null descriptor, with the same key.
335 void ClearNonLiveTransitions(); 675 void ClearNonLiveTransitions();
336 676
677 // Marking detaches initial maps from SharedFunctionInfo objects
678 // to make this reference weak. We need to reattach initial maps
679 // back after collection. This is either done during
680 // ClearNonLiveTransitions pass or by calling this function.
681 void ReattachInitialMaps();
682
337 // Mark all values associated with reachable keys in weak maps encountered 683 // Mark all values associated with reachable keys in weak maps encountered
338 // so far. This might push new object or even new weak maps onto the 684 // so far. This might push new object or even new weak maps onto the
339 // marking stack. 685 // marking stack.
340 void ProcessWeakMaps(); 686 void ProcessWeakMaps();
341 687
342 // After all reachable objects have been marked those weak map entries 688 // After all reachable objects have been marked those weak map entries
343 // with an unreachable key are removed from all encountered weak maps. 689 // with an unreachable key are removed from all encountered weak maps.
344 // The linked list of all encountered weak maps is destroyed. 690 // The linked list of all encountered weak maps is destroyed.
345 void ClearWeakMaps(); 691 void ClearWeakMaps();
346 692
347 // ----------------------------------------------------------------------- 693 // -----------------------------------------------------------------------
348 // Phase 2: Sweeping to clear mark bits and free non-live objects for 694 // Phase 2: Sweeping to clear mark bits and free non-live objects for
349 // a non-compacting collection, or else computing and encoding 695 // a non-compacting collection.
350 // forwarding addresses for a compacting collection.
351 // 696 //
352 // Before: Live objects are marked and non-live objects are unmarked. 697 // Before: Live objects are marked and non-live objects are unmarked.
353 // 698 //
354 // After: (Non-compacting collection.) Live objects are unmarked, 699 // After: Live objects are unmarked, non-live regions have been added to
355 // non-live regions have been added to their space's free 700 // their space's free list. Active eden semispace is compacted by
356 // list. 701 // evacuation.
357 // 702 //
358 // After: (Compacting collection.) The forwarding address of live
359 // objects in the paged spaces is encoded in their map word
360 // along with their (non-forwarded) map pointer.
361 //
362 // The forwarding address of live objects in the new space is
363 // written to their map word's offset in the inactive
364 // semispace.
365 //
366 // Bookkeeping data is written to the page header of
367 // eached paged-space page that contains live objects after
368 // compaction:
369 //
370 // The allocation watermark field is used to track the
371 // relocation top address, the address of the first word
372 // after the end of the last live object in the page after
373 // compaction.
374 //
375 // The Page::mc_page_index field contains the zero-based index of the
376 // page in its space. This word is only used for map space pages, in
377 // order to encode the map addresses in 21 bits to free 11
378 // bits per map word for the forwarding address.
379 //
380 // The Page::mc_first_forwarded field contains the (nonencoded)
381 // forwarding address of the first live object in the page.
382 //
383 // In both the new space and the paged spaces, a linked list
384 // of live regions is constructructed (linked through
385 // pointers in the non-live region immediately following each
386 // live region) to speed further passes of the collector.
387
388 // Encodes forwarding addresses of objects in compactable parts of the
389 // heap.
390 void EncodeForwardingAddresses();
391
392 // Encodes the forwarding addresses of objects in new space.
393 void EncodeForwardingAddressesInNewSpace();
394
395 // Function template to encode the forwarding addresses of objects in
396 // paged spaces, parameterized by allocation and non-live processing
397 // functions.
398 template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
399 void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
400
401 // Iterates live objects in a space, passes live objects
402 // to a callback function which returns the heap size of the object.
403 // Returns the number of live objects iterated.
404 int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f);
405 int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f);
406
407 // Iterates the live objects between a range of addresses, returning the
408 // number of live objects.
409 int IterateLiveObjectsInRange(Address start, Address end,
410 LiveObjectCallback size_func);
411 703
412 // If we are not compacting the heap, we simply sweep the spaces except 704 // If we are not compacting the heap, we simply sweep the spaces except
413 // for the large object space, clearing mark bits and adding unmarked 705 // for the large object space, clearing mark bits and adding unmarked
414 // regions to each space's free list. 706 // regions to each space's free list.
415 void SweepSpaces(); 707 void SweepSpaces();
416 708
417 // ----------------------------------------------------------------------- 709 void EvacuateNewSpace();
418 // Phase 3: Updating pointers in live objects.
419 //
420 // Before: Same as after phase 2 (compacting collection).
421 //
422 // After: All pointers in live objects, including encoded map
423 // pointers, are updated to point to their target's new
424 // location.
425 710
426 friend class UpdatingVisitor; // helper for updating visited objects 711 void EvacuateLiveObjectsFromPage(Page* p);
427 712
428 // Updates pointers in all spaces. 713 void EvacuatePages();
429 void UpdatePointers();
430 714
431 // Updates pointers in an object in new space. 715 void EvacuateNewSpaceAndCandidates();
432 // Returns the heap size of the object.
433 int UpdatePointersInNewObject(HeapObject* obj);
434 716
435 // Updates pointers in an object in old spaces. 717 void SweepSpace(PagedSpace* space, SweeperType sweeper);
436 // Returns the heap size of the object.
437 int UpdatePointersInOldObject(HeapObject* obj);
438 718
439 // Calculates the forwarding address of an object in an old space.
440 static Address GetForwardingAddressInOldSpace(HeapObject* obj);
441
442 // -----------------------------------------------------------------------
443 // Phase 4: Relocating objects.
444 //
445 // Before: Pointers to live objects are updated to point to their
446 // target's new location.
447 //
448 // After: Objects have been moved to their new addresses.
449
450 // Relocates objects in all spaces.
451 void RelocateObjects();
452
453 // Converts a code object's inline target to addresses, convention from
454 // address to target happens in the marking phase.
455 int ConvertCodeICTargetToAddress(HeapObject* obj);
456
457 // Relocate a map object.
458 int RelocateMapObject(HeapObject* obj);
459
460 // Relocates an old object.
461 int RelocateOldPointerObject(HeapObject* obj);
462 int RelocateOldDataObject(HeapObject* obj);
463
464 // Relocate a property cell object.
465 int RelocateCellObject(HeapObject* obj);
466
467 // Helper function.
468 inline int RelocateOldNonCodeObject(HeapObject* obj,
469 PagedSpace* space);
470
471 // Relocates an object in the code space.
472 int RelocateCodeObject(HeapObject* obj);
473
474 // Copy a new object.
475 int RelocateNewObject(HeapObject* obj);
476 719
477 #ifdef DEBUG 720 #ifdef DEBUG
478 // ----------------------------------------------------------------------- 721 // -----------------------------------------------------------------------
479 // Debugging variables, functions and classes 722 // Debugging variables, functions and classes
480 // Counters used for debugging the marking phase of mark-compact or 723 // Counters used for debugging the marking phase of mark-compact or
481 // mark-sweep collection. 724 // mark-sweep collection.
482 725
483 // Size of live objects in Heap::to_space_. 726 // Size of live objects in Heap::to_space_.
484 int live_young_objects_size_; 727 int live_young_objects_size_;
485 728
(...skipping 19 matching lines...) Expand all
505 int live_bytes_; 748 int live_bytes_;
506 749
507 friend class MarkObjectVisitor; 750 friend class MarkObjectVisitor;
508 static void VisitObject(HeapObject* obj); 751 static void VisitObject(HeapObject* obj);
509 752
510 friend class UnmarkObjectVisitor; 753 friend class UnmarkObjectVisitor;
511 static void UnmarkObject(HeapObject* obj); 754 static void UnmarkObject(HeapObject* obj);
512 #endif 755 #endif
513 756
514 Heap* heap_; 757 Heap* heap_;
515 MarkingStack marking_stack_; 758 MarkingDeque marking_deque_;
516 CodeFlusher* code_flusher_; 759 CodeFlusher* code_flusher_;
517 Object* encountered_weak_maps_; 760 Object* encountered_weak_maps_;
518 761
762 List<Page*> evacuation_candidates_;
763
519 friend class Heap; 764 friend class Heap;
520 friend class OverflowedObjectsScanner;
521 }; 765 };
522 766
523 767
524 } } // namespace v8::internal 768 } } // namespace v8::internal
525 769
526 #endif // V8_MARK_COMPACT_H_ 770 #endif // V8_MARK_COMPACT_H_
OLDNEW
« no previous file with comments | « src/log.cc ('k') | src/mark-compact.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698