Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(582)

Side by Side Diff: src/mark-compact.h

Issue 8139027: Version 3.6.5 (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: '' Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/macro-assembler.h ('k') | src/mark-compact.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #ifndef V8_MARK_COMPACT_H_ 28 #ifndef V8_MARK_COMPACT_H_
29 #define V8_MARK_COMPACT_H_ 29 #define V8_MARK_COMPACT_H_
30 30
31 #include "compiler-intrinsics.h"
31 #include "spaces.h" 32 #include "spaces.h"
32 33
33 namespace v8 { 34 namespace v8 {
34 namespace internal { 35 namespace internal {
35 36
36 // Callback function, returns whether an object is alive. The heap size 37 // Callback function, returns whether an object is alive. The heap size
37 // of the object is returned in size. It optionally updates the offset 38 // of the object is returned in size. It optionally updates the offset
38 // to the first live object in the page (only used for old and map objects). 39 // to the first live object in the page (only used for old and map objects).
39 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); 40 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
40 41
41 // Forward declarations. 42 // Forward declarations.
42 class CodeFlusher; 43 class CodeFlusher;
43 class GCTracer; 44 class GCTracer;
44 class MarkingVisitor; 45 class MarkingVisitor;
45 class RootMarkingVisitor; 46 class RootMarkingVisitor;
46 47
47 48
49 class Marking {
50 public:
51 explicit Marking(Heap* heap)
52 : heap_(heap) {
53 }
54
55 static inline MarkBit MarkBitFrom(Address addr);
56
57 static inline MarkBit MarkBitFrom(HeapObject* obj) {
58 return MarkBitFrom(reinterpret_cast<Address>(obj));
59 }
60
61 // Impossible markbits: 01
62 static const char* kImpossibleBitPattern;
63 static inline bool IsImpossible(MarkBit mark_bit) {
64 ASSERT(strcmp(kImpossibleBitPattern, "01") == 0);
65 return !mark_bit.Get() && mark_bit.Next().Get();
66 }
67
68 // Black markbits: 10 - this is required by the sweeper.
69 static const char* kBlackBitPattern;
70 static inline bool IsBlack(MarkBit mark_bit) {
71 ASSERT(strcmp(kBlackBitPattern, "10") == 0);
72 ASSERT(!IsImpossible(mark_bit));
73 return mark_bit.Get() && !mark_bit.Next().Get();
74 }
75
76 // White markbits: 00 - this is required by the mark bit clearer.
77 static const char* kWhiteBitPattern;
78 static inline bool IsWhite(MarkBit mark_bit) {
79 ASSERT(strcmp(kWhiteBitPattern, "00") == 0);
80 ASSERT(!IsImpossible(mark_bit));
81 return !mark_bit.Get();
82 }
83
84 // Grey markbits: 11
85 static const char* kGreyBitPattern;
86 static inline bool IsGrey(MarkBit mark_bit) {
87 ASSERT(strcmp(kGreyBitPattern, "11") == 0);
88 ASSERT(!IsImpossible(mark_bit));
89 return mark_bit.Get() && mark_bit.Next().Get();
90 }
91
92 static inline void MarkBlack(MarkBit mark_bit) {
93 mark_bit.Set();
94 mark_bit.Next().Clear();
95 ASSERT(Marking::IsBlack(mark_bit));
96 }
97
98 static inline void BlackToGrey(MarkBit markbit) {
99 ASSERT(IsBlack(markbit));
100 markbit.Next().Set();
101 ASSERT(IsGrey(markbit));
102 }
103
104 static inline void WhiteToGrey(MarkBit markbit) {
105 ASSERT(IsWhite(markbit));
106 markbit.Set();
107 markbit.Next().Set();
108 ASSERT(IsGrey(markbit));
109 }
110
111 static inline void GreyToBlack(MarkBit markbit) {
112 ASSERT(IsGrey(markbit));
113 markbit.Next().Clear();
114 ASSERT(IsBlack(markbit));
115 }
116
117 static inline void BlackToGrey(HeapObject* obj) {
118 ASSERT(obj->Size() >= 2 * kPointerSize);
119 BlackToGrey(MarkBitFrom(obj));
120 }
121
122 static inline void AnyToGrey(MarkBit markbit) {
123 markbit.Set();
124 markbit.Next().Set();
125 ASSERT(IsGrey(markbit));
126 }
127
128 // Returns true if the the object whose mark is transferred is marked black.
129 bool TransferMark(Address old_start, Address new_start);
130
131 #ifdef DEBUG
132 enum ObjectColor {
133 BLACK_OBJECT,
134 WHITE_OBJECT,
135 GREY_OBJECT,
136 IMPOSSIBLE_COLOR
137 };
138
139 static const char* ColorName(ObjectColor color) {
140 switch (color) {
141 case BLACK_OBJECT: return "black";
142 case WHITE_OBJECT: return "white";
143 case GREY_OBJECT: return "grey";
144 case IMPOSSIBLE_COLOR: return "impossible";
145 }
146 return "error";
147 }
148
149 static ObjectColor Color(HeapObject* obj) {
150 return Color(Marking::MarkBitFrom(obj));
151 }
152
153 static ObjectColor Color(MarkBit mark_bit) {
154 if (IsBlack(mark_bit)) return BLACK_OBJECT;
155 if (IsWhite(mark_bit)) return WHITE_OBJECT;
156 if (IsGrey(mark_bit)) return GREY_OBJECT;
157 UNREACHABLE();
158 return IMPOSSIBLE_COLOR;
159 }
160 #endif
161
162 // Returns true if the transferred color is black.
163 INLINE(static bool TransferColor(HeapObject* from,
164 HeapObject* to)) {
165 MarkBit from_mark_bit = MarkBitFrom(from);
166 MarkBit to_mark_bit = MarkBitFrom(to);
167 bool is_black = false;
168 if (from_mark_bit.Get()) {
169 to_mark_bit.Set();
170 is_black = true; // Looks black so far.
171 }
172 if (from_mark_bit.Next().Get()) {
173 to_mark_bit.Next().Set();
174 is_black = false; // Was actually gray.
175 }
176 ASSERT(Color(from) == Color(to));
177 ASSERT(is_black == (Color(to) == BLACK_OBJECT));
178 return is_black;
179 }
180
181 private:
182 Heap* heap_;
183 };
184
48 // ---------------------------------------------------------------------------- 185 // ----------------------------------------------------------------------------
49 // Marking stack for tracing live objects. 186 // Marking deque for tracing live objects.
50 187
51 class MarkingStack { 188 class MarkingDeque {
52 public: 189 public:
53 MarkingStack() : low_(NULL), top_(NULL), high_(NULL), overflowed_(false) { } 190 MarkingDeque()
191 : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
54 192
55 void Initialize(Address low, Address high) { 193 void Initialize(Address low, Address high) {
56 top_ = low_ = reinterpret_cast<HeapObject**>(low); 194 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
57 high_ = reinterpret_cast<HeapObject**>(high); 195 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
196 array_ = obj_low;
197 mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
198 top_ = bottom_ = 0;
58 overflowed_ = false; 199 overflowed_ = false;
59 } 200 }
60 201
61 bool is_full() const { return top_ >= high_; } 202 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
62 203
63 bool is_empty() const { return top_ <= low_; } 204 inline bool IsEmpty() { return top_ == bottom_; }
64 205
65 bool overflowed() const { return overflowed_; } 206 bool overflowed() const { return overflowed_; }
66 207
67 void clear_overflowed() { overflowed_ = false; } 208 void ClearOverflowed() { overflowed_ = false; }
209
210 void SetOverflowed() { overflowed_ = true; }
68 211
69 // Push the (marked) object on the marking stack if there is room, 212 // Push the (marked) object on the marking stack if there is room,
70 // otherwise mark the object as overflowed and wait for a rescan of the 213 // otherwise mark the object as overflowed and wait for a rescan of the
71 // heap. 214 // heap.
72 void Push(HeapObject* object) { 215 inline void PushBlack(HeapObject* object) {
73 CHECK(object->IsHeapObject()); 216 ASSERT(object->IsHeapObject());
74 if (is_full()) { 217 if (IsFull()) {
75 object->SetOverflow(); 218 Marking::BlackToGrey(object);
76 overflowed_ = true; 219 MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
220 SetOverflowed();
77 } else { 221 } else {
78 *(top_++) = object; 222 array_[top_] = object;
79 } 223 top_ = ((top_ + 1) & mask_);
80 } 224 }
81 225 }
82 HeapObject* Pop() { 226
83 ASSERT(!is_empty()); 227 inline void PushGrey(HeapObject* object) {
84 HeapObject* object = *(--top_); 228 ASSERT(object->IsHeapObject());
85 CHECK(object->IsHeapObject()); 229 if (IsFull()) {
230 ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
231 SetOverflowed();
232 } else {
233 array_[top_] = object;
234 top_ = ((top_ + 1) & mask_);
235 }
236 }
237
238 inline HeapObject* Pop() {
239 ASSERT(!IsEmpty());
240 top_ = ((top_ - 1) & mask_);
241 HeapObject* object = array_[top_];
242 ASSERT(object->IsHeapObject());
86 return object; 243 return object;
87 } 244 }
88 245
246 inline void UnshiftGrey(HeapObject* object) {
247 ASSERT(object->IsHeapObject());
248 if (IsFull()) {
249 ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
250 SetOverflowed();
251 } else {
252 bottom_ = ((bottom_ - 1) & mask_);
253 array_[bottom_] = object;
254 }
255 }
256
257 HeapObject** array() { return array_; }
258 int bottom() { return bottom_; }
259 int top() { return top_; }
260 int mask() { return mask_; }
261 void set_top(int top) { top_ = top; }
262
89 private: 263 private:
90 HeapObject** low_; 264 HeapObject** array_;
91 HeapObject** top_; 265 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
92 HeapObject** high_; 266 // empty when top_ == bottom_. It is full when top_ + 1 == bottom
267 // (mod mask + 1).
268 int top_;
269 int bottom_;
270 int mask_;
93 bool overflowed_; 271 bool overflowed_;
94 272
95 DISALLOW_COPY_AND_ASSIGN(MarkingStack); 273 DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
274 };
275
276
277 class SlotsBufferAllocator {
278 public:
279 SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
280 void DeallocateBuffer(SlotsBuffer* buffer);
281
282 void DeallocateChain(SlotsBuffer** buffer_address);
283 };
284
285
286 // SlotsBuffer records a sequence of slots that has to be updated
287 // after live objects were relocated from evacuation candidates.
288 // All slots are either untyped or typed:
289 // - Untyped slots are expected to contain a tagged object pointer.
290 // They are recorded by an address.
291 // - Typed slots are expected to contain an encoded pointer to a heap
292 // object where the way of encoding depends on the type of the slot.
293 // They are recorded as a pair (SlotType, slot address).
294 // We assume that zero-page is never mapped this allows us to distinguish
295 // untyped slots from typed slots during iteration by a simple comparison:
296 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
297 // is the first element of typed slot's pair.
298 class SlotsBuffer {
299 public:
300 typedef Object** ObjectSlot;
301
302 explicit SlotsBuffer(SlotsBuffer* next_buffer)
303 : idx_(0), chain_length_(1), next_(next_buffer) {
304 if (next_ != NULL) {
305 chain_length_ = next_->chain_length_ + 1;
306 }
307 }
308
309 ~SlotsBuffer() {
310 }
311
312 void Add(ObjectSlot slot) {
313 ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
314 slots_[idx_++] = slot;
315 }
316
317 enum SlotType {
318 RELOCATED_CODE_OBJECT,
319 CODE_TARGET_SLOT,
320 CODE_ENTRY_SLOT,
321 DEBUG_TARGET_SLOT,
322 JS_RETURN_SLOT,
323 NUMBER_OF_SLOT_TYPES
324 };
325
326 void UpdateSlots(Heap* heap);
327
328 void UpdateSlotsWithFilter(Heap* heap);
329
330 SlotsBuffer* next() { return next_; }
331
332 static int SizeOfChain(SlotsBuffer* buffer) {
333 if (buffer == NULL) return 0;
334 return static_cast<int>(buffer->idx_ +
335 (buffer->chain_length_ - 1) * kNumberOfElements);
336 }
337
338 inline bool IsFull() {
339 return idx_ == kNumberOfElements;
340 }
341
342 inline bool HasSpaceForTypedSlot() {
343 return idx_ < kNumberOfElements - 1;
344 }
345
346 static void UpdateSlotsRecordedIn(Heap* heap,
347 SlotsBuffer* buffer,
348 bool code_slots_filtering_required) {
349 while (buffer != NULL) {
350 if (code_slots_filtering_required) {
351 buffer->UpdateSlotsWithFilter(heap);
352 } else {
353 buffer->UpdateSlots(heap);
354 }
355 buffer = buffer->next();
356 }
357 }
358
359 enum AdditionMode {
360 FAIL_ON_OVERFLOW,
361 IGNORE_OVERFLOW
362 };
363
364 static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
365 return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
366 }
367
368 static bool AddTo(SlotsBufferAllocator* allocator,
369 SlotsBuffer** buffer_address,
370 ObjectSlot slot,
371 AdditionMode mode) {
372 SlotsBuffer* buffer = *buffer_address;
373 if (buffer == NULL || buffer->IsFull()) {
374 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
375 allocator->DeallocateChain(buffer_address);
376 return false;
377 }
378 buffer = allocator->AllocateBuffer(buffer);
379 *buffer_address = buffer;
380 }
381 buffer->Add(slot);
382 return true;
383 }
384
385 static bool IsTypedSlot(ObjectSlot slot);
386
387 static bool AddTo(SlotsBufferAllocator* allocator,
388 SlotsBuffer** buffer_address,
389 SlotType type,
390 Address addr,
391 AdditionMode mode);
392
393 static const int kNumberOfElements = 1021;
394
395 private:
396 static const int kChainLengthThreshold = 6;
397
398 intptr_t idx_;
399 intptr_t chain_length_;
400 SlotsBuffer* next_;
401 ObjectSlot slots_[kNumberOfElements];
96 }; 402 };
97 403
98 404
99 // ------------------------------------------------------------------------- 405 // -------------------------------------------------------------------------
100 // Mark-Compact collector 406 // Mark-Compact collector
101
102 class OverflowedObjectsScanner;
103
104 class MarkCompactCollector { 407 class MarkCompactCollector {
105 public: 408 public:
106 // Type of functions to compute forwarding addresses of objects in 409 // Type of functions to compute forwarding addresses of objects in
107 // compacted spaces. Given an object and its size, return a (non-failure) 410 // compacted spaces. Given an object and its size, return a (non-failure)
108 // Object* that will be the object after forwarding. There is a separate 411 // Object* that will be the object after forwarding. There is a separate
109 // allocation function for each (compactable) space based on the location 412 // allocation function for each (compactable) space based on the location
110 // of the object before compaction. 413 // of the object before compaction.
111 typedef MaybeObject* (*AllocationFunction)(Heap* heap, 414 typedef MaybeObject* (*AllocationFunction)(Heap* heap,
112 HeapObject* object, 415 HeapObject* object,
113 int object_size); 416 int object_size);
(...skipping 13 matching lines...) Expand all
127 int* offset); 430 int* offset);
128 431
129 // Type of functions to process non-live objects. 432 // Type of functions to process non-live objects.
130 typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate); 433 typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
131 434
132 // Pointer to member function, used in IterateLiveObjects. 435 // Pointer to member function, used in IterateLiveObjects.
133 typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj); 436 typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
134 437
135 // Set the global force_compaction flag, it must be called before Prepare 438 // Set the global force_compaction flag, it must be called before Prepare
136 // to take effect. 439 // to take effect.
137 void SetForceCompaction(bool value) { 440 inline void SetFlags(int flags);
138 force_compaction_ = value; 441
442 inline bool PreciseSweepingRequired() {
443 return sweep_precisely_;
139 } 444 }
140 445
446 static void Initialize();
141 447
142 static void Initialize(); 448 void CollectEvacuationCandidates(PagedSpace* space);
449
450 void AddEvacuationCandidate(Page* p);
143 451
144 // Prepares for GC by resetting relocation info in old and map spaces and 452 // Prepares for GC by resetting relocation info in old and map spaces and
145 // choosing spaces to compact. 453 // choosing spaces to compact.
146 void Prepare(GCTracer* tracer); 454 void Prepare(GCTracer* tracer);
147 455
148 // Performs a global garbage collection. 456 // Performs a global garbage collection.
149 void CollectGarbage(); 457 void CollectGarbage();
150 458
151 // True if the last full GC performed heap compaction. 459 bool StartCompaction();
152 bool HasCompacted() { return compacting_collection_; }
153 460
154 // True after the Prepare phase if the compaction is taking place. 461 void AbortCompaction();
155 bool IsCompacting() {
156 #ifdef DEBUG
157 // For the purposes of asserts we don't want this to keep returning true
158 // after the collection is completed.
159 return state_ != IDLE && compacting_collection_;
160 #else
161 return compacting_collection_;
162 #endif
163 }
164
165 // The count of the number of objects left marked at the end of the last
166 // completed full GC (expected to be zero).
167 int previous_marked_count() { return previous_marked_count_; }
168 462
169 // During a full GC, there is a stack-allocated GCTracer that is used for 463 // During a full GC, there is a stack-allocated GCTracer that is used for
170 // bookkeeping information. Return a pointer to that tracer. 464 // bookkeeping information. Return a pointer to that tracer.
171 GCTracer* tracer() { return tracer_; } 465 GCTracer* tracer() { return tracer_; }
172 466
173 #ifdef DEBUG 467 #ifdef DEBUG
174 // Checks whether performing mark-compact collection. 468 // Checks whether performing mark-compact collection.
175 bool in_use() { return state_ > PREPARE_GC; } 469 bool in_use() { return state_ > PREPARE_GC; }
176 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; } 470 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
177 #endif 471 #endif
178 472
179 // Determine type of object and emit deletion log event. 473 // Determine type of object and emit deletion log event.
180 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate); 474 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
181 475
182 // Returns size of a possibly marked object.
183 static int SizeOfMarkedObject(HeapObject* obj);
184
185 // Distinguishable invalid map encodings (for single word and multiple words) 476 // Distinguishable invalid map encodings (for single word and multiple words)
186 // that indicate free regions. 477 // that indicate free regions.
187 static const uint32_t kSingleFreeEncoding = 0; 478 static const uint32_t kSingleFreeEncoding = 0;
188 static const uint32_t kMultiFreeEncoding = 1; 479 static const uint32_t kMultiFreeEncoding = 1;
189 480
481 static inline bool IsMarked(Object* obj);
482
190 inline Heap* heap() const { return heap_; } 483 inline Heap* heap() const { return heap_; }
191 484
192 CodeFlusher* code_flusher() { return code_flusher_; } 485 CodeFlusher* code_flusher() { return code_flusher_; }
193 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; } 486 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
194 void EnableCodeFlushing(bool enable); 487 void EnableCodeFlushing(bool enable);
195 488
489 enum SweeperType {
490 CONSERVATIVE,
491 LAZY_CONSERVATIVE,
492 PRECISE
493 };
494
495 #ifdef DEBUG
496 void VerifyMarkbitsAreClean();
497 static void VerifyMarkbitsAreClean(PagedSpace* space);
498 static void VerifyMarkbitsAreClean(NewSpace* space);
499 #endif
500
501 // Sweep a single page from the given space conservatively.
502 // Return a number of reclaimed bytes.
503 static intptr_t SweepConservatively(PagedSpace* space, Page* p);
504
505 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
506 return Page::FromAddress(reinterpret_cast<Address>(anchor))->
507 ShouldSkipEvacuationSlotRecording();
508 }
509
510 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
511 return Page::FromAddress(reinterpret_cast<Address>(host))->
512 ShouldSkipEvacuationSlotRecording();
513 }
514
515 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
516 return Page::FromAddress(reinterpret_cast<Address>(obj))->
517 IsEvacuationCandidate();
518 }
519
520 void EvictEvacuationCandidate(Page* page) {
521 if (FLAG_trace_fragmentation) {
522 PrintF("Page %p is too popular. Disabling evacuation.\n",
523 reinterpret_cast<void*>(page));
524 }
525
526 // TODO(gc) If all evacuation candidates are too popular we
527 // should stop slots recording entirely.
528 page->ClearEvacuationCandidate();
529
530 // We were not collecting slots on this page that point
531 // to other evacuation candidates thus we have to
532 // rescan the page after evacuation to discover and update all
533 // pointers to evacuated objects.
534 if (page->owner()->identity() == OLD_DATA_SPACE) {
535 evacuation_candidates_.RemoveElement(page);
536 } else {
537 page->SetFlag(Page::RESCAN_ON_EVACUATION);
538 }
539 }
540
541 void RecordRelocSlot(RelocInfo* rinfo, Code* target);
542 void RecordCodeEntrySlot(Address slot, Code* target);
543
544 INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
545
546 void MigrateObject(Address dst,
547 Address src,
548 int size,
549 AllocationSpace to_old_space);
550
551 bool TryPromoteObject(HeapObject* object, int object_size);
552
196 inline Object* encountered_weak_maps() { return encountered_weak_maps_; } 553 inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
197 inline void set_encountered_weak_maps(Object* weak_map) { 554 inline void set_encountered_weak_maps(Object* weak_map) {
198 encountered_weak_maps_ = weak_map; 555 encountered_weak_maps_ = weak_map;
199 } 556 }
200 557
558 void InvalidateCode(Code* code);
559
201 private: 560 private:
202 MarkCompactCollector(); 561 MarkCompactCollector();
203 ~MarkCompactCollector(); 562 ~MarkCompactCollector();
204 563
564 bool MarkInvalidatedCode();
565 void RemoveDeadInvalidatedCode();
566 void ProcessInvalidatedCode(ObjectVisitor* visitor);
567
568
205 #ifdef DEBUG 569 #ifdef DEBUG
206 enum CollectorState { 570 enum CollectorState {
207 IDLE, 571 IDLE,
208 PREPARE_GC, 572 PREPARE_GC,
209 MARK_LIVE_OBJECTS, 573 MARK_LIVE_OBJECTS,
210 SWEEP_SPACES, 574 SWEEP_SPACES,
211 ENCODE_FORWARDING_ADDRESSES, 575 ENCODE_FORWARDING_ADDRESSES,
212 UPDATE_POINTERS, 576 UPDATE_POINTERS,
213 RELOCATE_OBJECTS 577 RELOCATE_OBJECTS
214 }; 578 };
215 579
216 // The current stage of the collector. 580 // The current stage of the collector.
217 CollectorState state_; 581 CollectorState state_;
218 #endif 582 #endif
219 583
220 // Global flag that forces a compaction. 584 // Global flag that forces sweeping to be precise, so we can traverse the
221 bool force_compaction_; 585 // heap.
586 bool sweep_precisely_;
222 587
223 // Global flag indicating whether spaces were compacted on the last GC. 588 // True if we are collecting slots to perform evacuation from evacuation
224 bool compacting_collection_; 589 // candidates.
590 bool compacting_;
225 591
226 // Global flag indicating whether spaces will be compacted on the next GC. 592 bool was_marked_incrementally_;
227 bool compact_on_next_gc_;
228 593
229 // The number of objects left marked at the end of the last completed full 594 bool collect_maps_;
230 // GC (expected to be zero).
231 int previous_marked_count_;
232 595
233 // A pointer to the current stack-allocated GC tracer object during a full 596 // A pointer to the current stack-allocated GC tracer object during a full
234 // collection (NULL before and after). 597 // collection (NULL before and after).
235 GCTracer* tracer_; 598 GCTracer* tracer_;
236 599
600 SlotsBufferAllocator slots_buffer_allocator_;
601
602 SlotsBuffer* migration_slots_buffer_;
603
237 // Finishes GC, performs heap verification if enabled. 604 // Finishes GC, performs heap verification if enabled.
238 void Finish(); 605 void Finish();
239 606
240 // ----------------------------------------------------------------------- 607 // -----------------------------------------------------------------------
241 // Phase 1: Marking live objects. 608 // Phase 1: Marking live objects.
242 // 609 //
243 // Before: The heap has been prepared for garbage collection by 610 // Before: The heap has been prepared for garbage collection by
244 // MarkCompactCollector::Prepare() and is otherwise in its 611 // MarkCompactCollector::Prepare() and is otherwise in its
245 // normal state. 612 // normal state.
246 // 613 //
247 // After: Live objects are marked and non-live objects are unmarked. 614 // After: Live objects are marked and non-live objects are unmarked.
248 615
249 616
250 friend class RootMarkingVisitor; 617 friend class RootMarkingVisitor;
251 friend class MarkingVisitor; 618 friend class MarkingVisitor;
252 friend class StaticMarkingVisitor; 619 friend class StaticMarkingVisitor;
253 friend class CodeMarkingVisitor; 620 friend class CodeMarkingVisitor;
254 friend class SharedFunctionInfoMarkingVisitor; 621 friend class SharedFunctionInfoMarkingVisitor;
255 622
256 void PrepareForCodeFlushing(); 623 void PrepareForCodeFlushing();
257 624
258 // Marking operations for objects reachable from roots. 625 // Marking operations for objects reachable from roots.
259 void MarkLiveObjects(); 626 void MarkLiveObjects();
260 627
261 void MarkUnmarkedObject(HeapObject* obj); 628 void AfterMarking();
262 629
263 inline void MarkObject(HeapObject* obj) { 630 INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
264 if (!obj->IsMarked()) MarkUnmarkedObject(obj);
265 }
266 631
267 inline void SetMark(HeapObject* obj); 632 INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
633
634 void ProcessNewlyMarkedObject(HeapObject* obj);
268 635
269 // Creates back pointers for all map transitions, stores them in 636 // Creates back pointers for all map transitions, stores them in
270 // the prototype field. The original prototype pointers are restored 637 // the prototype field. The original prototype pointers are restored
271 // in ClearNonLiveTransitions(). All JSObject maps 638 // in ClearNonLiveTransitions(). All JSObject maps
272 // connected by map transitions have the same prototype object, which 639 // connected by map transitions have the same prototype object, which
273 // is why we can use this field temporarily for back pointers. 640 // is why we can use this field temporarily for back pointers.
274 void CreateBackPointers(); 641 void CreateBackPointers();
275 642
276 // Mark a Map and its DescriptorArray together, skipping transitions. 643 // Mark a Map and its DescriptorArray together, skipping transitions.
277 void MarkMapContents(Map* map); 644 void MarkMapContents(Map* map);
(...skipping 13 matching lines...) Expand all
291 // Mark objects in implicit references groups if their parent object 658 // Mark objects in implicit references groups if their parent object
292 // is marked. 659 // is marked.
293 void MarkImplicitRefGroups(); 660 void MarkImplicitRefGroups();
294 661
295 // Mark all objects which are reachable due to host application 662 // Mark all objects which are reachable due to host application
296 // logic like object groups or implicit references' groups. 663 // logic like object groups or implicit references' groups.
297 void ProcessExternalMarking(); 664 void ProcessExternalMarking();
298 665
299 // Mark objects reachable (transitively) from objects in the marking stack 666 // Mark objects reachable (transitively) from objects in the marking stack
300 // or overflowed in the heap. 667 // or overflowed in the heap.
301 void ProcessMarkingStack(); 668 void ProcessMarkingDeque();
302 669
303 // Mark objects reachable (transitively) from objects in the marking 670 // Mark objects reachable (transitively) from objects in the marking
304 // stack. This function empties the marking stack, but may leave 671 // stack. This function empties the marking stack, but may leave
305 // overflowed objects in the heap, in which case the marking stack's 672 // overflowed objects in the heap, in which case the marking stack's
306 // overflow flag will be set. 673 // overflow flag will be set.
307 void EmptyMarkingStack(); 674 void EmptyMarkingDeque();
308 675
309 // Refill the marking stack with overflowed objects from the heap. This 676 // Refill the marking stack with overflowed objects from the heap. This
310 // function either leaves the marking stack full or clears the overflow 677 // function either leaves the marking stack full or clears the overflow
311 // flag on the marking stack. 678 // flag on the marking stack.
312 void RefillMarkingStack(); 679 void RefillMarkingDeque();
313 680
314 // After reachable maps have been marked process per context object 681 // After reachable maps have been marked process per context object
315 // literal map caches removing unmarked entries. 682 // literal map caches removing unmarked entries.
316 void ProcessMapCaches(); 683 void ProcessMapCaches();
317 684
318 // Callback function for telling whether the object *p is an unmarked 685 // Callback function for telling whether the object *p is an unmarked
319 // heap object. 686 // heap object.
320 static bool IsUnmarkedHeapObject(Object** p); 687 static bool IsUnmarkedHeapObject(Object** p);
321 688
322 #ifdef DEBUG 689 #ifdef DEBUG
323 void UpdateLiveObjectCount(HeapObject* obj); 690 void UpdateLiveObjectCount(HeapObject* obj);
324 #endif 691 #endif
325 692
326 // We sweep the large object space in the same way whether we are
327 // compacting or not, because the large object space is never compacted.
328 void SweepLargeObjectSpace();
329
330 // Test whether a (possibly marked) object is a Map.
331 static inline bool SafeIsMap(HeapObject* object);
332
333 // Map transitions from a live map to a dead map must be killed. 693 // Map transitions from a live map to a dead map must be killed.
334 // We replace them with a null descriptor, with the same key. 694 // We replace them with a null descriptor, with the same key.
335 void ClearNonLiveTransitions(); 695 void ClearNonLiveTransitions();
336 696
697 // Marking detaches initial maps from SharedFunctionInfo objects
698 // to make this reference weak. We need to reattach initial maps
699 // back after collection. This is either done during
700 // ClearNonLiveTransitions pass or by calling this function.
701 void ReattachInitialMaps();
702
337 // Mark all values associated with reachable keys in weak maps encountered 703 // Mark all values associated with reachable keys in weak maps encountered
338 // so far. This might push new object or even new weak maps onto the 704 // so far. This might push new object or even new weak maps onto the
339 // marking stack. 705 // marking stack.
340 void ProcessWeakMaps(); 706 void ProcessWeakMaps();
341 707
342 // After all reachable objects have been marked those weak map entries 708 // After all reachable objects have been marked those weak map entries
343 // with an unreachable key are removed from all encountered weak maps. 709 // with an unreachable key are removed from all encountered weak maps.
344 // The linked list of all encountered weak maps is destroyed. 710 // The linked list of all encountered weak maps is destroyed.
345 void ClearWeakMaps(); 711 void ClearWeakMaps();
346 712
347 // ----------------------------------------------------------------------- 713 // -----------------------------------------------------------------------
348 // Phase 2: Sweeping to clear mark bits and free non-live objects for 714 // Phase 2: Sweeping to clear mark bits and free non-live objects for
349 // a non-compacting collection, or else computing and encoding 715 // a non-compacting collection.
350 // forwarding addresses for a compacting collection.
351 // 716 //
352 // Before: Live objects are marked and non-live objects are unmarked. 717 // Before: Live objects are marked and non-live objects are unmarked.
353 // 718 //
354 // After: (Non-compacting collection.) Live objects are unmarked, 719 // After: Live objects are unmarked, non-live regions have been added to
355 // non-live regions have been added to their space's free 720 // their space's free list. Active eden semispace is compacted by
356 // list. 721 // evacuation.
357 // 722 //
358 // After: (Compacting collection.) The forwarding address of live
359 // objects in the paged spaces is encoded in their map word
360 // along with their (non-forwarded) map pointer.
361 //
362 // The forwarding address of live objects in the new space is
363 // written to their map word's offset in the inactive
364 // semispace.
365 //
366 // Bookkeeping data is written to the page header of
367 // eached paged-space page that contains live objects after
368 // compaction:
369 //
370 // The allocation watermark field is used to track the
371 // relocation top address, the address of the first word
372 // after the end of the last live object in the page after
373 // compaction.
374 //
375 // The Page::mc_page_index field contains the zero-based index of the
376 // page in its space. This word is only used for map space pages, in
377 // order to encode the map addresses in 21 bits to free 11
378 // bits per map word for the forwarding address.
379 //
380 // The Page::mc_first_forwarded field contains the (nonencoded)
381 // forwarding address of the first live object in the page.
382 //
383 // In both the new space and the paged spaces, a linked list
384 // of live regions is constructructed (linked through
385 // pointers in the non-live region immediately following each
386 // live region) to speed further passes of the collector.
387
388 // Encodes forwarding addresses of objects in compactable parts of the
389 // heap.
390 void EncodeForwardingAddresses();
391
392 // Encodes the forwarding addresses of objects in new space.
393 void EncodeForwardingAddressesInNewSpace();
394
395 // Function template to encode the forwarding addresses of objects in
396 // paged spaces, parameterized by allocation and non-live processing
397 // functions.
398 template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
399 void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
400
401 // Iterates live objects in a space, passes live objects
402 // to a callback function which returns the heap size of the object.
403 // Returns the number of live objects iterated.
404 int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f);
405 int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f);
406
407 // Iterates the live objects between a range of addresses, returning the
408 // number of live objects.
409 int IterateLiveObjectsInRange(Address start, Address end,
410 LiveObjectCallback size_func);
411 723
412 // If we are not compacting the heap, we simply sweep the spaces except 724 // If we are not compacting the heap, we simply sweep the spaces except
413 // for the large object space, clearing mark bits and adding unmarked 725 // for the large object space, clearing mark bits and adding unmarked
414 // regions to each space's free list. 726 // regions to each space's free list.
415 void SweepSpaces(); 727 void SweepSpaces();
416 728
417 // ----------------------------------------------------------------------- 729 void EvacuateNewSpace();
418 // Phase 3: Updating pointers in live objects.
419 //
420 // Before: Same as after phase 2 (compacting collection).
421 //
422 // After: All pointers in live objects, including encoded map
423 // pointers, are updated to point to their target's new
424 // location.
425 730
426 friend class UpdatingVisitor; // helper for updating visited objects 731 void EvacuateLiveObjectsFromPage(Page* p);
427 732
428 // Updates pointers in all spaces. 733 void EvacuatePages();
429 void UpdatePointers();
430 734
431 // Updates pointers in an object in new space. 735 void EvacuateNewSpaceAndCandidates();
432 // Returns the heap size of the object.
433 int UpdatePointersInNewObject(HeapObject* obj);
434 736
435 // Updates pointers in an object in old spaces. 737 void SweepSpace(PagedSpace* space, SweeperType sweeper);
436 // Returns the heap size of the object.
437 int UpdatePointersInOldObject(HeapObject* obj);
438 738
439 // Calculates the forwarding address of an object in an old space.
440 static Address GetForwardingAddressInOldSpace(HeapObject* obj);
441
442 // -----------------------------------------------------------------------
443 // Phase 4: Relocating objects.
444 //
445 // Before: Pointers to live objects are updated to point to their
446 // target's new location.
447 //
448 // After: Objects have been moved to their new addresses.
449
450 // Relocates objects in all spaces.
451 void RelocateObjects();
452
453 // Converts a code object's inline target to addresses, convention from
454 // address to target happens in the marking phase.
455 int ConvertCodeICTargetToAddress(HeapObject* obj);
456
457 // Relocate a map object.
458 int RelocateMapObject(HeapObject* obj);
459
460 // Relocates an old object.
461 int RelocateOldPointerObject(HeapObject* obj);
462 int RelocateOldDataObject(HeapObject* obj);
463
464 // Relocate a property cell object.
465 int RelocateCellObject(HeapObject* obj);
466
467 // Helper function.
468 inline int RelocateOldNonCodeObject(HeapObject* obj,
469 PagedSpace* space);
470
471 // Relocates an object in the code space.
472 int RelocateCodeObject(HeapObject* obj);
473
474 // Copy a new object.
475 int RelocateNewObject(HeapObject* obj);
476 739
477 #ifdef DEBUG 740 #ifdef DEBUG
478 // ----------------------------------------------------------------------- 741 // -----------------------------------------------------------------------
479 // Debugging variables, functions and classes 742 // Debugging variables, functions and classes
480 // Counters used for debugging the marking phase of mark-compact or 743 // Counters used for debugging the marking phase of mark-compact or
481 // mark-sweep collection. 744 // mark-sweep collection.
482 745
483 // Size of live objects in Heap::to_space_. 746 // Size of live objects in Heap::to_space_.
484 int live_young_objects_size_; 747 int live_young_objects_size_;
485 748
(...skipping 19 matching lines...) Expand all
505 int live_bytes_; 768 int live_bytes_;
506 769
507 friend class MarkObjectVisitor; 770 friend class MarkObjectVisitor;
508 static void VisitObject(HeapObject* obj); 771 static void VisitObject(HeapObject* obj);
509 772
510 friend class UnmarkObjectVisitor; 773 friend class UnmarkObjectVisitor;
511 static void UnmarkObject(HeapObject* obj); 774 static void UnmarkObject(HeapObject* obj);
512 #endif 775 #endif
513 776
514 Heap* heap_; 777 Heap* heap_;
515 MarkingStack marking_stack_; 778 MarkingDeque marking_deque_;
516 CodeFlusher* code_flusher_; 779 CodeFlusher* code_flusher_;
517 Object* encountered_weak_maps_; 780 Object* encountered_weak_maps_;
518 781
782 List<Page*> evacuation_candidates_;
783 List<Code*> invalidated_code_;
784
519 friend class Heap; 785 friend class Heap;
520 friend class OverflowedObjectsScanner;
521 }; 786 };
522 787
523 788
789 const char* AllocationSpaceName(AllocationSpace space);
790
524 } } // namespace v8::internal 791 } } // namespace v8::internal
525 792
526 #endif // V8_MARK_COMPACT_H_ 793 #endif // V8_MARK_COMPACT_H_
OLDNEW
« no previous file with comments | « src/macro-assembler.h ('k') | src/mark-compact.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698