Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(299)

Side by Side Diff: src/mark-compact.cc

Issue 8139027: Version 3.6.5 (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: '' Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mark-compact.h ('k') | src/mark-compact-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #include "code-stubs.h"
30 #include "compilation-cache.h" 31 #include "compilation-cache.h"
32 #include "deoptimizer.h"
31 #include "execution.h" 33 #include "execution.h"
32 #include "heap-profiler.h"
33 #include "gdb-jit.h" 34 #include "gdb-jit.h"
34 #include "global-handles.h" 35 #include "global-handles.h"
36 #include "heap-profiler.h"
35 #include "ic-inl.h" 37 #include "ic-inl.h"
38 #include "incremental-marking.h"
36 #include "liveobjectlist-inl.h" 39 #include "liveobjectlist-inl.h"
37 #include "mark-compact.h" 40 #include "mark-compact.h"
38 #include "objects-visiting.h" 41 #include "objects-visiting.h"
42 #include "objects-visiting-inl.h"
39 #include "stub-cache.h" 43 #include "stub-cache.h"
40 44
41 namespace v8 { 45 namespace v8 {
42 namespace internal { 46 namespace internal {
43 47
48
49 const char* Marking::kWhiteBitPattern = "00";
50 const char* Marking::kBlackBitPattern = "10";
51 const char* Marking::kGreyBitPattern = "11";
52 const char* Marking::kImpossibleBitPattern = "01";
53
54
44 // ------------------------------------------------------------------------- 55 // -------------------------------------------------------------------------
45 // MarkCompactCollector 56 // MarkCompactCollector
46 57
47 MarkCompactCollector::MarkCompactCollector() : // NOLINT 58 MarkCompactCollector::MarkCompactCollector() : // NOLINT
48 #ifdef DEBUG 59 #ifdef DEBUG
49 state_(IDLE), 60 state_(IDLE),
50 #endif 61 #endif
51 force_compaction_(false), 62 sweep_precisely_(false),
52 compacting_collection_(false), 63 compacting_(false),
53 compact_on_next_gc_(false), 64 was_marked_incrementally_(false),
54 previous_marked_count_(0), 65 collect_maps_(FLAG_collect_maps),
55 tracer_(NULL), 66 tracer_(NULL),
67 migration_slots_buffer_(NULL),
56 #ifdef DEBUG 68 #ifdef DEBUG
57 live_young_objects_size_(0), 69 live_young_objects_size_(0),
58 live_old_pointer_objects_size_(0), 70 live_old_pointer_objects_size_(0),
59 live_old_data_objects_size_(0), 71 live_old_data_objects_size_(0),
60 live_code_objects_size_(0), 72 live_code_objects_size_(0),
61 live_map_objects_size_(0), 73 live_map_objects_size_(0),
62 live_cell_objects_size_(0), 74 live_cell_objects_size_(0),
63 live_lo_objects_size_(0), 75 live_lo_objects_size_(0),
64 live_bytes_(0), 76 live_bytes_(0),
65 #endif 77 #endif
66 heap_(NULL), 78 heap_(NULL),
67 code_flusher_(NULL), 79 code_flusher_(NULL),
68 encountered_weak_maps_(NULL) { } 80 encountered_weak_maps_(NULL) { }
69 81
70 82
83 #ifdef DEBUG
84 class VerifyMarkingVisitor: public ObjectVisitor {
85 public:
86 void VisitPointers(Object** start, Object** end) {
87 for (Object** current = start; current < end; current++) {
88 if ((*current)->IsHeapObject()) {
89 HeapObject* object = HeapObject::cast(*current);
90 ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
91 }
92 }
93 }
94 };
95
96
97 static void VerifyMarking(Address bottom, Address top) {
98 VerifyMarkingVisitor visitor;
99 HeapObject* object;
100 Address next_object_must_be_here_or_later = bottom;
101
102 for (Address current = bottom;
103 current < top;
104 current += kPointerSize) {
105 object = HeapObject::FromAddress(current);
106 if (MarkCompactCollector::IsMarked(object)) {
107 ASSERT(current >= next_object_must_be_here_or_later);
108 object->Iterate(&visitor);
109 next_object_must_be_here_or_later = current + object->Size();
110 }
111 }
112 }
113
114
115 static void VerifyMarking(NewSpace* space) {
116 Address end = space->top();
117 NewSpacePageIterator it(space->bottom(), end);
118 // The bottom position is at the start of its page. Allows us to use
119 // page->body() as start of range on all pages.
120 ASSERT_EQ(space->bottom(),
121 NewSpacePage::FromAddress(space->bottom())->body());
122 while (it.has_next()) {
123 NewSpacePage* page = it.next();
124 Address limit = it.has_next() ? page->body_limit() : end;
125 ASSERT(limit == end || !page->Contains(end));
126 VerifyMarking(page->body(), limit);
127 }
128 }
129
130
131 static void VerifyMarking(PagedSpace* space) {
132 PageIterator it(space);
133
134 while (it.has_next()) {
135 Page* p = it.next();
136 VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd());
137 }
138 }
139
140
141 static void VerifyMarking(Heap* heap) {
142 VerifyMarking(heap->old_pointer_space());
143 VerifyMarking(heap->old_data_space());
144 VerifyMarking(heap->code_space());
145 VerifyMarking(heap->cell_space());
146 VerifyMarking(heap->map_space());
147 VerifyMarking(heap->new_space());
148
149 VerifyMarkingVisitor visitor;
150
151 LargeObjectIterator it(heap->lo_space());
152 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
153 if (MarkCompactCollector::IsMarked(obj)) {
154 obj->Iterate(&visitor);
155 }
156 }
157
158 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
159 }
160
161
162 class VerifyEvacuationVisitor: public ObjectVisitor {
163 public:
164 void VisitPointers(Object** start, Object** end) {
165 for (Object** current = start; current < end; current++) {
166 if ((*current)->IsHeapObject()) {
167 HeapObject* object = HeapObject::cast(*current);
168 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
169 }
170 }
171 }
172 };
173
174
175 static void VerifyEvacuation(Address bottom, Address top) {
176 VerifyEvacuationVisitor visitor;
177 HeapObject* object;
178 Address next_object_must_be_here_or_later = bottom;
179
180 for (Address current = bottom;
181 current < top;
182 current += kPointerSize) {
183 object = HeapObject::FromAddress(current);
184 if (MarkCompactCollector::IsMarked(object)) {
185 ASSERT(current >= next_object_must_be_here_or_later);
186 object->Iterate(&visitor);
187 next_object_must_be_here_or_later = current + object->Size();
188 }
189 }
190 }
191
192
193 static void VerifyEvacuation(NewSpace* space) {
194 NewSpacePageIterator it(space->bottom(), space->top());
195 VerifyEvacuationVisitor visitor;
196
197 while (it.has_next()) {
198 NewSpacePage* page = it.next();
199 Address current = page->body();
200 Address limit = it.has_next() ? page->body_limit() : space->top();
201 ASSERT(limit == space->top() || !page->Contains(space->top()));
202 while (current < limit) {
203 HeapObject* object = HeapObject::FromAddress(current);
204 object->Iterate(&visitor);
205 current += object->Size();
206 }
207 }
208 }
209
210
211 static void VerifyEvacuation(PagedSpace* space) {
212 PageIterator it(space);
213
214 while (it.has_next()) {
215 Page* p = it.next();
216 if (p->IsEvacuationCandidate()) continue;
217 VerifyEvacuation(p->ObjectAreaStart(), p->ObjectAreaEnd());
218 }
219 }
220
221
222 static void VerifyEvacuation(Heap* heap) {
223 VerifyEvacuation(heap->old_pointer_space());
224 VerifyEvacuation(heap->old_data_space());
225 VerifyEvacuation(heap->code_space());
226 VerifyEvacuation(heap->cell_space());
227 VerifyEvacuation(heap->map_space());
228 VerifyEvacuation(heap->new_space());
229
230 VerifyEvacuationVisitor visitor;
231 heap->IterateStrongRoots(&visitor, VISIT_ALL);
232 }
233 #endif
234
235
236 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
237 p->MarkEvacuationCandidate();
238 evacuation_candidates_.Add(p);
239 }
240
241
242 bool MarkCompactCollector::StartCompaction() {
243 if (!compacting_) {
244 ASSERT(evacuation_candidates_.length() == 0);
245
246 CollectEvacuationCandidates(heap()->old_pointer_space());
247 CollectEvacuationCandidates(heap()->old_data_space());
248
249 if (FLAG_compact_code_space) {
250 CollectEvacuationCandidates(heap()->code_space());
251 }
252
253 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
254 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
255 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
256
257 compacting_ = evacuation_candidates_.length() > 0;
258 }
259
260 return compacting_;
261 }
262
263
71 void MarkCompactCollector::CollectGarbage() { 264 void MarkCompactCollector::CollectGarbage() {
72 // Make sure that Prepare() has been called. The individual steps below will 265 // Make sure that Prepare() has been called. The individual steps below will
73 // update the state as they proceed. 266 // update the state as they proceed.
74 ASSERT(state_ == PREPARE_GC); 267 ASSERT(state_ == PREPARE_GC);
75 ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); 268 ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
76 269
77 // Prepare has selected whether to compact the old generation or not.
78 // Tell the tracer.
79 if (IsCompacting()) tracer_->set_is_compacting();
80
81 MarkLiveObjects(); 270 MarkLiveObjects();
82 271 ASSERT(heap_->incremental_marking()->IsStopped());
83 if (FLAG_collect_maps) ClearNonLiveTransitions(); 272
273 if (collect_maps_) ClearNonLiveTransitions();
84 274
85 ClearWeakMaps(); 275 ClearWeakMaps();
86 276
87 SweepLargeObjectSpace(); 277 #ifdef DEBUG
88 278 if (FLAG_verify_heap) {
89 if (IsCompacting()) { 279 VerifyMarking(heap_);
90 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT); 280 }
91 EncodeForwardingAddresses(); 281 #endif
92 282
93 heap()->MarkMapPointersAsEncoded(true); 283 SweepSpaces();
94 UpdatePointers(); 284
95 heap()->MarkMapPointersAsEncoded(false); 285 if (!collect_maps_) ReattachInitialMaps();
96 heap()->isolate()->pc_to_code_cache()->Flush(); 286
97 287 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
98 RelocateObjects();
99 } else {
100 SweepSpaces();
101 heap()->isolate()->pc_to_code_cache()->Flush();
102 }
103 288
104 Finish(); 289 Finish();
105 290
106 // Save the count of marked objects remaining after the collection and
107 // null out the GC tracer.
108 previous_marked_count_ = tracer_->marked_count();
109 ASSERT(previous_marked_count_ == 0);
110 tracer_ = NULL; 291 tracer_ = NULL;
111 } 292 }
112 293
113 294
295 #ifdef DEBUG
296 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
297 PageIterator it(space);
298
299 while (it.has_next()) {
300 Page* p = it.next();
301 CHECK(p->markbits()->IsClean());
302 CHECK_EQ(0, p->LiveBytes());
303 }
304 }
305
306 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
307 NewSpacePageIterator it(space->bottom(), space->top());
308
309 while (it.has_next()) {
310 NewSpacePage* p = it.next();
311 CHECK(p->markbits()->IsClean());
312 CHECK_EQ(0, p->LiveBytes());
313 }
314 }
315
316 void MarkCompactCollector::VerifyMarkbitsAreClean() {
317 VerifyMarkbitsAreClean(heap_->old_pointer_space());
318 VerifyMarkbitsAreClean(heap_->old_data_space());
319 VerifyMarkbitsAreClean(heap_->code_space());
320 VerifyMarkbitsAreClean(heap_->cell_space());
321 VerifyMarkbitsAreClean(heap_->map_space());
322 VerifyMarkbitsAreClean(heap_->new_space());
323
324 LargeObjectIterator it(heap_->lo_space());
325 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
326 MarkBit mark_bit = Marking::MarkBitFrom(obj);
327 ASSERT(Marking::IsWhite(mark_bit));
328 }
329 }
330 #endif
331
332
333 static void ClearMarkbits(PagedSpace* space) {
334 PageIterator it(space);
335
336 while (it.has_next()) {
337 Bitmap::Clear(it.next());
338 }
339 }
340
341
342 static void ClearMarkbits(NewSpace* space) {
343 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
344
345 while (it.has_next()) {
346 Bitmap::Clear(it.next());
347 }
348 }
349
350
351 static void ClearMarkbits(Heap* heap) {
352 ClearMarkbits(heap->code_space());
353 ClearMarkbits(heap->map_space());
354 ClearMarkbits(heap->old_pointer_space());
355 ClearMarkbits(heap->old_data_space());
356 ClearMarkbits(heap->cell_space());
357 ClearMarkbits(heap->new_space());
358
359 LargeObjectIterator it(heap->lo_space());
360 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
361 MarkBit mark_bit = Marking::MarkBitFrom(obj);
362 mark_bit.Clear();
363 mark_bit.Next().Clear();
364 }
365 }
366
367
368 bool Marking::TransferMark(Address old_start, Address new_start) {
369 // This is only used when resizing an object.
370 ASSERT(MemoryChunk::FromAddress(old_start) ==
371 MemoryChunk::FromAddress(new_start));
372
373 // If the mark doesn't move, we don't check the color of the object.
374 // It doesn't matter whether the object is black, since it hasn't changed
375 // size, so the adjustment to the live data count will be zero anyway.
376 if (old_start == new_start) return false;
377
378 MarkBit new_mark_bit = MarkBitFrom(new_start);
379 MarkBit old_mark_bit = MarkBitFrom(old_start);
380
381 #ifdef DEBUG
382 ObjectColor old_color = Color(old_mark_bit);
383 #endif
384
385 if (Marking::IsBlack(old_mark_bit)) {
386 old_mark_bit.Clear();
387 ASSERT(IsWhite(old_mark_bit));
388 Marking::MarkBlack(new_mark_bit);
389 return true;
390 } else if (Marking::IsGrey(old_mark_bit)) {
391 ASSERT(heap_->incremental_marking()->IsMarking());
392 old_mark_bit.Clear();
393 old_mark_bit.Next().Clear();
394 ASSERT(IsWhite(old_mark_bit));
395 heap_->incremental_marking()->WhiteToGreyAndPush(
396 HeapObject::FromAddress(new_start), new_mark_bit);
397 heap_->incremental_marking()->RestartIfNotMarking();
398 }
399
400 #ifdef DEBUG
401 ObjectColor new_color = Color(new_mark_bit);
402 ASSERT(new_color == old_color);
403 #endif
404
405 return false;
406 }
407
408
409 const char* AllocationSpaceName(AllocationSpace space) {
410 switch (space) {
411 case NEW_SPACE: return "NEW_SPACE";
412 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
413 case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
414 case CODE_SPACE: return "CODE_SPACE";
415 case MAP_SPACE: return "MAP_SPACE";
416 case CELL_SPACE: return "CELL_SPACE";
417 case LO_SPACE: return "LO_SPACE";
418 default:
419 UNREACHABLE();
420 }
421
422 return NULL;
423 }
424
425
426 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
427 ASSERT(space->identity() == OLD_POINTER_SPACE ||
428 space->identity() == OLD_DATA_SPACE ||
429 space->identity() == CODE_SPACE);
430
431 PageIterator it(space);
432 int count = 0;
433 if (it.has_next()) it.next(); // Never compact the first page.
434 while (it.has_next()) {
435 Page* p = it.next();
436 if (space->IsFragmented(p)) {
437 AddEvacuationCandidate(p);
438 count++;
439 } else {
440 p->ClearEvacuationCandidate();
441 }
442 }
443
444 if (count > 0 && FLAG_trace_fragmentation) {
445 PrintF("Collected %d evacuation candidates for space %s\n",
446 count,
447 AllocationSpaceName(space->identity()));
448 }
449 }
450
451
452 void MarkCompactCollector::AbortCompaction() {
453 if (compacting_) {
454 int npages = evacuation_candidates_.length();
455 for (int i = 0; i < npages; i++) {
456 Page* p = evacuation_candidates_[i];
457 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
458 p->ClearEvacuationCandidate();
459 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
460 }
461 compacting_ = false;
462 evacuation_candidates_.Rewind(0);
463 invalidated_code_.Rewind(0);
464 }
465 ASSERT_EQ(0, evacuation_candidates_.length());
466 }
467
468
114 void MarkCompactCollector::Prepare(GCTracer* tracer) { 469 void MarkCompactCollector::Prepare(GCTracer* tracer) {
470 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
471
472 // Disable collection of maps if incremental marking is enabled.
473 // Map collection algorithm relies on a special map transition tree traversal
474 // order which is not implemented for incremental marking.
475 collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
476
115 // Rather than passing the tracer around we stash it in a static member 477 // Rather than passing the tracer around we stash it in a static member
116 // variable. 478 // variable.
117 tracer_ = tracer; 479 tracer_ = tracer;
118 480
119 #ifdef DEBUG 481 #ifdef DEBUG
120 ASSERT(state_ == IDLE); 482 ASSERT(state_ == IDLE);
121 state_ = PREPARE_GC; 483 state_ = PREPARE_GC;
122 #endif 484 #endif
123 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
124 485
125 compacting_collection_ = 486 ASSERT(!FLAG_never_compact || !FLAG_always_compact);
126 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
127 compact_on_next_gc_ = false;
128 487
129 if (FLAG_never_compact) compacting_collection_ = false; 488 if (collect_maps_) CreateBackPointers();
130 if (!heap()->map_space()->MapPointersEncodable())
131 compacting_collection_ = false;
132 if (FLAG_collect_maps) CreateBackPointers();
133 #ifdef ENABLE_GDB_JIT_INTERFACE 489 #ifdef ENABLE_GDB_JIT_INTERFACE
134 if (FLAG_gdbjit) { 490 if (FLAG_gdbjit) {
135 // If GDBJIT interface is active disable compaction. 491 // If GDBJIT interface is active disable compaction.
136 compacting_collection_ = false; 492 compacting_collection_ = false;
137 } 493 }
138 #endif 494 #endif
139 495
496 // Clear marking bits for precise sweeping to collect all garbage.
497 if (was_marked_incrementally_ && PreciseSweepingRequired()) {
498 heap()->incremental_marking()->Abort();
499 ClearMarkbits(heap_);
500 AbortCompaction();
501 was_marked_incrementally_ = false;
502 }
503
504 // Don't start compaction if we are in the middle of incremental
505 // marking cycle. We did not collect any slots.
506 if (!FLAG_never_compact && !was_marked_incrementally_) {
507 StartCompaction();
508 }
509
140 PagedSpaces spaces; 510 PagedSpaces spaces;
141 for (PagedSpace* space = spaces.next(); 511 for (PagedSpace* space = spaces.next();
142 space != NULL; space = spaces.next()) { 512 space != NULL;
143 space->PrepareForMarkCompact(compacting_collection_); 513 space = spaces.next()) {
514 space->PrepareForMarkCompact();
144 } 515 }
145 516
146 #ifdef DEBUG 517 #ifdef DEBUG
518 if (!was_marked_incrementally_) {
519 VerifyMarkbitsAreClean();
520 }
521 #endif
522
523 #ifdef DEBUG
147 live_bytes_ = 0; 524 live_bytes_ = 0;
148 live_young_objects_size_ = 0; 525 live_young_objects_size_ = 0;
149 live_old_pointer_objects_size_ = 0; 526 live_old_pointer_objects_size_ = 0;
150 live_old_data_objects_size_ = 0; 527 live_old_data_objects_size_ = 0;
151 live_code_objects_size_ = 0; 528 live_code_objects_size_ = 0;
152 live_map_objects_size_ = 0; 529 live_map_objects_size_ = 0;
153 live_cell_objects_size_ = 0; 530 live_cell_objects_size_ = 0;
154 live_lo_objects_size_ = 0; 531 live_lo_objects_size_ = 0;
155 #endif 532 #endif
156 } 533 }
157 534
158 535
159 void MarkCompactCollector::Finish() { 536 void MarkCompactCollector::Finish() {
160 #ifdef DEBUG 537 #ifdef DEBUG
161 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); 538 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
162 state_ = IDLE; 539 state_ = IDLE;
163 #endif 540 #endif
164 // The stub cache is not traversed during GC; clear the cache to 541 // The stub cache is not traversed during GC; clear the cache to
165 // force lazy re-initialization of it. This must be done after the 542 // force lazy re-initialization of it. This must be done after the
166 // GC, because it relies on the new address of certain old space 543 // GC, because it relies on the new address of certain old space
167 // objects (empty string, illegal builtin). 544 // objects (empty string, illegal builtin).
168 heap()->isolate()->stub_cache()->Clear(); 545 heap()->isolate()->stub_cache()->Clear();
169 546
170 heap()->external_string_table_.CleanUp(); 547 heap()->external_string_table_.CleanUp();
171
172 // If we've just compacted old space there's no reason to check the
173 // fragmentation limit. Just return.
174 if (HasCompacted()) return;
175
176 // We compact the old generation on the next GC if it has gotten too
177 // fragmented (ie, we could recover an expected amount of space by
178 // reclaiming the waste and free list blocks).
179 static const int kFragmentationLimit = 15; // Percent.
180 static const int kFragmentationAllowed = 1 * MB; // Absolute.
181 intptr_t old_gen_recoverable = 0;
182 intptr_t old_gen_used = 0;
183
184 OldSpaces spaces;
185 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
186 old_gen_recoverable += space->Waste() + space->AvailableFree();
187 old_gen_used += space->Size();
188 }
189
190 int old_gen_fragmentation =
191 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
192 if (old_gen_fragmentation > kFragmentationLimit &&
193 old_gen_recoverable > kFragmentationAllowed) {
194 compact_on_next_gc_ = true;
195 }
196 } 548 }
197 549
198 550
199 // ------------------------------------------------------------------------- 551 // -------------------------------------------------------------------------
200 // Phase 1: tracing and marking live objects. 552 // Phase 1: tracing and marking live objects.
201 // before: all objects are in normal state. 553 // before: all objects are in normal state.
202 // after: a live object's map pointer is marked as '00'. 554 // after: a live object's map pointer is marked as '00'.
203 555
204 // Marking all live objects in the heap as part of mark-sweep or mark-compact 556 // Marking all live objects in the heap as part of mark-sweep or mark-compact
205 // collection. Before marking, all objects are in their normal state. After 557 // collection. Before marking, all objects are in their normal state. After
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
254 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); 606 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
255 607
256 JSFunction* candidate = jsfunction_candidates_head_; 608 JSFunction* candidate = jsfunction_candidates_head_;
257 JSFunction* next_candidate; 609 JSFunction* next_candidate;
258 while (candidate != NULL) { 610 while (candidate != NULL) {
259 next_candidate = GetNextCandidate(candidate); 611 next_candidate = GetNextCandidate(candidate);
260 612
261 SharedFunctionInfo* shared = candidate->unchecked_shared(); 613 SharedFunctionInfo* shared = candidate->unchecked_shared();
262 614
263 Code* code = shared->unchecked_code(); 615 Code* code = shared->unchecked_code();
264 if (!code->IsMarked()) { 616 MarkBit code_mark = Marking::MarkBitFrom(code);
617 if (!code_mark.Get()) {
265 shared->set_code(lazy_compile); 618 shared->set_code(lazy_compile);
266 candidate->set_code(lazy_compile); 619 candidate->set_code(lazy_compile);
267 } else { 620 } else {
268 candidate->set_code(shared->unchecked_code()); 621 candidate->set_code(shared->unchecked_code());
269 } 622 }
270 623
624 // We are in the middle of a GC cycle so the write barrier in the code
625 // setter did not record the slot update and we have to do that manually.
626 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
627 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
628 isolate_->heap()->mark_compact_collector()->
629 RecordCodeEntrySlot(slot, target);
630
271 candidate = next_candidate; 631 candidate = next_candidate;
272 } 632 }
273 633
274 jsfunction_candidates_head_ = NULL; 634 jsfunction_candidates_head_ = NULL;
275 } 635 }
276 636
277 637
278 void ProcessSharedFunctionInfoCandidates() { 638 void ProcessSharedFunctionInfoCandidates() {
279 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); 639 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
280 640
281 SharedFunctionInfo* candidate = shared_function_info_candidates_head_; 641 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
282 SharedFunctionInfo* next_candidate; 642 SharedFunctionInfo* next_candidate;
283 while (candidate != NULL) { 643 while (candidate != NULL) {
284 next_candidate = GetNextCandidate(candidate); 644 next_candidate = GetNextCandidate(candidate);
285 SetNextCandidate(candidate, NULL); 645 SetNextCandidate(candidate, NULL);
286 646
287 Code* code = candidate->unchecked_code(); 647 Code* code = candidate->unchecked_code();
288 if (!code->IsMarked()) { 648 MarkBit code_mark = Marking::MarkBitFrom(code);
649 if (!code_mark.Get()) {
289 candidate->set_code(lazy_compile); 650 candidate->set_code(lazy_compile);
290 } 651 }
291 652
292 candidate = next_candidate; 653 candidate = next_candidate;
293 } 654 }
294 655
295 shared_function_info_candidates_head_ = NULL; 656 shared_function_info_candidates_head_ = NULL;
296 } 657 }
297 658
298 static JSFunction** GetNextCandidateField(JSFunction* candidate) { 659 static JSFunction** GetNextCandidateField(JSFunction* candidate) {
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
348 // 709 //
349 // Here we assume that if we change *p, we replace it with a heap object 710 // Here we assume that if we change *p, we replace it with a heap object
350 // (ie, the left substring of a cons string is always a heap object). 711 // (ie, the left substring of a cons string is always a heap object).
351 // 712 //
352 // The check performed is: 713 // The check performed is:
353 // object->IsConsString() && !object->IsSymbol() && 714 // object->IsConsString() && !object->IsSymbol() &&
354 // (ConsString::cast(object)->second() == HEAP->empty_string()) 715 // (ConsString::cast(object)->second() == HEAP->empty_string())
355 // except the maps for the object and its possible substrings might be 716 // except the maps for the object and its possible substrings might be
356 // marked. 717 // marked.
357 HeapObject* object = HeapObject::cast(*p); 718 HeapObject* object = HeapObject::cast(*p);
358 MapWord map_word = object->map_word(); 719 Map* map = object->map();
359 map_word.ClearMark(); 720 InstanceType type = map->instance_type();
360 InstanceType type = map_word.ToMap()->instance_type();
361 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; 721 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
362 722
363 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); 723 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
364 Heap* heap = map_word.ToMap()->heap(); 724 Heap* heap = map->GetHeap();
365 if (second != heap->raw_unchecked_empty_string()) { 725 if (second != heap->empty_string()) {
366 return object; 726 return object;
367 } 727 }
368 728
369 // Since we don't have the object's start, it is impossible to update the 729 // Since we don't have the object's start, it is impossible to update the
370 // page dirty marks. Therefore, we only replace the string with its left 730 // page dirty marks. Therefore, we only replace the string with its left
371 // substring when page dirty marks do not change. 731 // substring when page dirty marks do not change.
372 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first(); 732 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
373 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object; 733 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
374 734
375 *p = first; 735 *p = first;
(...skipping 21 matching lines...) Expand all
397 table_.Register(kVisitSlicedString, 757 table_.Register(kVisitSlicedString,
398 &FixedBodyVisitor<StaticMarkingVisitor, 758 &FixedBodyVisitor<StaticMarkingVisitor,
399 SlicedString::BodyDescriptor, 759 SlicedString::BodyDescriptor,
400 void>::Visit); 760 void>::Visit);
401 761
402 table_.Register(kVisitFixedArray, 762 table_.Register(kVisitFixedArray,
403 &FlexibleBodyVisitor<StaticMarkingVisitor, 763 &FlexibleBodyVisitor<StaticMarkingVisitor,
404 FixedArray::BodyDescriptor, 764 FixedArray::BodyDescriptor,
405 void>::Visit); 765 void>::Visit);
406 766
767 table_.Register(kVisitGlobalContext, &VisitGlobalContext);
768
407 table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit); 769 table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
408 770
409 table_.Register(kVisitGlobalContext,
410 &FixedBodyVisitor<StaticMarkingVisitor,
411 Context::MarkCompactBodyDescriptor,
412 void>::Visit);
413
414 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); 771 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
772 table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
415 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); 773 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
416 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); 774 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
417 775
418 table_.Register(kVisitJSWeakMap, &VisitJSWeakMap); 776 table_.Register(kVisitJSWeakMap, &VisitJSWeakMap);
419 777
420 table_.Register(kVisitOddball, 778 table_.Register(kVisitOddball,
421 &FixedBodyVisitor<StaticMarkingVisitor, 779 &FixedBodyVisitor<StaticMarkingVisitor,
422 Oddball::BodyDescriptor, 780 Oddball::BodyDescriptor,
423 void>::Visit); 781 void>::Visit);
424 table_.Register(kVisitMap, 782 table_.Register(kVisitMap,
(...skipping 24 matching lines...) Expand all
449 table_.RegisterSpecializations<JSObjectVisitor, 807 table_.RegisterSpecializations<JSObjectVisitor,
450 kVisitJSObject, 808 kVisitJSObject,
451 kVisitJSObjectGeneric>(); 809 kVisitJSObjectGeneric>();
452 810
453 table_.RegisterSpecializations<StructObjectVisitor, 811 table_.RegisterSpecializations<StructObjectVisitor,
454 kVisitStruct, 812 kVisitStruct,
455 kVisitStructGeneric>(); 813 kVisitStructGeneric>();
456 } 814 }
457 815
458 INLINE(static void VisitPointer(Heap* heap, Object** p)) { 816 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
459 MarkObjectByPointer(heap, p); 817 MarkObjectByPointer(heap->mark_compact_collector(), p, p);
460 } 818 }
461 819
462 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { 820 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
463 // Mark all objects pointed to in [start, end). 821 // Mark all objects pointed to in [start, end).
464 const int kMinRangeForMarkingRecursion = 64; 822 const int kMinRangeForMarkingRecursion = 64;
465 if (end - start >= kMinRangeForMarkingRecursion) { 823 if (end - start >= kMinRangeForMarkingRecursion) {
466 if (VisitUnmarkedObjects(heap, start, end)) return; 824 if (VisitUnmarkedObjects(heap, start, end)) return;
467 // We are close to a stack overflow, so just mark the objects. 825 // We are close to a stack overflow, so just mark the objects.
468 } 826 }
469 for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p); 827 MarkCompactCollector* collector = heap->mark_compact_collector();
470 } 828 for (Object** p = start; p < end; p++) {
471 829 MarkObjectByPointer(collector, start, p);
472 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
473 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
474 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
475 if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
476 IC::Clear(rinfo->pc());
477 // Please note targets for cleared inline cached do not have to be
478 // marked since they are contained in HEAP->non_monomorphic_cache().
479 } else {
480 heap->mark_compact_collector()->MarkObject(code);
481 } 830 }
482 } 831 }
483 832
484 static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) { 833 static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
485 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); 834 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
486 Object* cell = rinfo->target_cell(); 835 JSGlobalPropertyCell* cell =
487 Object* old_cell = cell; 836 JSGlobalPropertyCell::cast(rinfo->target_cell());
488 VisitPointer(heap, &cell); 837 MarkBit mark = Marking::MarkBitFrom(cell);
489 if (cell != old_cell) { 838 heap->mark_compact_collector()->MarkObject(cell, mark);
490 rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell)); 839 }
840
841 static inline void VisitEmbeddedPointer(Heap* heap, Code* host, Object** p) {
842 MarkObjectByPointer(heap->mark_compact_collector(),
843 reinterpret_cast<Object**>(host),
844 p);
845 }
846
847 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
848 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
849 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
850 if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) {
851 IC::Clear(rinfo->pc());
852 // Please note targets for cleared inline cached do not have to be
853 // marked since they are contained in HEAP->non_monomorphic_cache().
854 target = Code::GetCodeFromTargetAddress(rinfo->target_address());
855 } else {
856 if (FLAG_cleanup_code_caches_at_gc &&
857 target->kind() == Code::STUB &&
858 target->major_key() == CodeStub::CallFunction &&
859 target->has_function_cache()) {
860 CallFunctionStub::Clear(heap, rinfo->pc());
861 }
862 MarkBit code_mark = Marking::MarkBitFrom(target);
863 heap->mark_compact_collector()->MarkObject(target, code_mark);
491 } 864 }
865 heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
492 } 866 }
493 867
494 static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) { 868 static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
495 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && 869 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
496 rinfo->IsPatchedReturnSequence()) || 870 rinfo->IsPatchedReturnSequence()) ||
497 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && 871 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
498 rinfo->IsPatchedDebugBreakSlotSequence())); 872 rinfo->IsPatchedDebugBreakSlotSequence()));
499 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address()); 873 Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
500 heap->mark_compact_collector()->MarkObject(code); 874 MarkBit code_mark = Marking::MarkBitFrom(target);
875 heap->mark_compact_collector()->MarkObject(target, code_mark);
876 heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
501 } 877 }
502 878
503 // Mark object pointed to by p. 879 // Mark object pointed to by p.
504 INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) { 880 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
881 Object** anchor_slot,
882 Object** p)) {
505 if (!(*p)->IsHeapObject()) return; 883 if (!(*p)->IsHeapObject()) return;
506 HeapObject* object = ShortCircuitConsString(p); 884 HeapObject* object = ShortCircuitConsString(p);
507 if (!object->IsMarked()) { 885 collector->RecordSlot(anchor_slot, p, object);
508 heap->mark_compact_collector()->MarkUnmarkedObject(object); 886 MarkBit mark = Marking::MarkBitFrom(object);
509 } 887 collector->MarkObject(object, mark);
510 } 888 }
511 889
512 890
513 // Visit an unmarked object. 891 // Visit an unmarked object.
514 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, 892 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
515 HeapObject* obj)) { 893 HeapObject* obj)) {
516 #ifdef DEBUG 894 #ifdef DEBUG
517 ASSERT(Isolate::Current()->heap()->Contains(obj)); 895 ASSERT(Isolate::Current()->heap()->Contains(obj));
518 ASSERT(!obj->IsMarked()); 896 ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
519 #endif 897 #endif
520 Map* map = obj->map(); 898 Map* map = obj->map();
521 collector->SetMark(obj); 899 Heap* heap = obj->GetHeap();
900 MarkBit mark = Marking::MarkBitFrom(obj);
901 heap->mark_compact_collector()->SetMark(obj, mark);
522 // Mark the map pointer and the body. 902 // Mark the map pointer and the body.
523 if (!map->IsMarked()) collector->MarkUnmarkedObject(map); 903 MarkBit map_mark = Marking::MarkBitFrom(map);
904 heap->mark_compact_collector()->MarkObject(map, map_mark);
524 IterateBody(map, obj); 905 IterateBody(map, obj);
525 } 906 }
526 907
527 // Visit all unmarked objects pointed to by [start, end). 908 // Visit all unmarked objects pointed to by [start, end).
528 // Returns false if the operation fails (lack of stack space). 909 // Returns false if the operation fails (lack of stack space).
529 static inline bool VisitUnmarkedObjects(Heap* heap, 910 static inline bool VisitUnmarkedObjects(Heap* heap,
530 Object** start, 911 Object** start,
531 Object** end) { 912 Object** end) {
532 // Return false is we are close to the stack limit. 913 // Return false is we are close to the stack limit.
533 StackLimitCheck check(heap->isolate()); 914 StackLimitCheck check(heap->isolate());
534 if (check.HasOverflowed()) return false; 915 if (check.HasOverflowed()) return false;
535 916
536 MarkCompactCollector* collector = heap->mark_compact_collector(); 917 MarkCompactCollector* collector = heap->mark_compact_collector();
537 // Visit the unmarked objects. 918 // Visit the unmarked objects.
538 for (Object** p = start; p < end; p++) { 919 for (Object** p = start; p < end; p++) {
539 if (!(*p)->IsHeapObject()) continue; 920 Object* o = *p;
540 HeapObject* obj = HeapObject::cast(*p); 921 if (!o->IsHeapObject()) continue;
541 if (obj->IsMarked()) continue; 922 collector->RecordSlot(start, p, o);
923 HeapObject* obj = HeapObject::cast(o);
924 MarkBit mark = Marking::MarkBitFrom(obj);
925 if (mark.Get()) continue;
542 VisitUnmarkedObject(collector, obj); 926 VisitUnmarkedObject(collector, obj);
543 } 927 }
544 return true; 928 return true;
545 } 929 }
546 930
547 static inline void VisitExternalReference(Address* p) { } 931 static inline void VisitExternalReference(Address* p) { }
548 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { } 932 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
549 933
550 private: 934 private:
551 class DataObjectVisitor { 935 class DataObjectVisitor {
552 public: 936 public:
553 template<int size> 937 template<int size>
554 static void VisitSpecialized(Map* map, HeapObject* object) { 938 static void VisitSpecialized(Map* map, HeapObject* object) {
555 } 939 }
556 940
557 static void Visit(Map* map, HeapObject* object) { 941 static void Visit(Map* map, HeapObject* object) {
558 } 942 }
559 }; 943 };
560 944
561 typedef FlexibleBodyVisitor<StaticMarkingVisitor, 945 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
562 JSObject::BodyDescriptor, 946 JSObject::BodyDescriptor,
563 void> JSObjectVisitor; 947 void> JSObjectVisitor;
564 948
565 typedef FlexibleBodyVisitor<StaticMarkingVisitor, 949 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
566 StructBodyDescriptor, 950 StructBodyDescriptor,
567 void> StructObjectVisitor; 951 void> StructObjectVisitor;
568 952
569 static void VisitJSWeakMap(Map* map, HeapObject* object) { 953 static void VisitJSWeakMap(Map* map, HeapObject* object) {
570 MarkCompactCollector* collector = map->heap()->mark_compact_collector(); 954 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
571 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object); 955 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
572 956
573 // Enqueue weak map in linked list of encountered weak maps. 957 // Enqueue weak map in linked list of encountered weak maps.
574 ASSERT(weak_map->next() == Smi::FromInt(0)); 958 ASSERT(weak_map->next() == Smi::FromInt(0));
575 weak_map->set_next(collector->encountered_weak_maps()); 959 weak_map->set_next(collector->encountered_weak_maps());
576 collector->set_encountered_weak_maps(weak_map); 960 collector->set_encountered_weak_maps(weak_map);
577 961
578 // Skip visiting the backing hash table containing the mappings. 962 // Skip visiting the backing hash table containing the mappings.
579 int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object); 963 int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
580 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers( 964 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
581 map->heap(), 965 map->GetHeap(),
582 object, 966 object,
583 JSWeakMap::BodyDescriptor::kStartOffset, 967 JSWeakMap::BodyDescriptor::kStartOffset,
584 JSWeakMap::kTableOffset); 968 JSWeakMap::kTableOffset);
585 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers( 969 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
586 map->heap(), 970 map->GetHeap(),
587 object, 971 object,
588 JSWeakMap::kTableOffset + kPointerSize, 972 JSWeakMap::kTableOffset + kPointerSize,
589 object_size); 973 object_size);
590 974
591 // Mark the backing hash table without pushing it on the marking stack. 975 // Mark the backing hash table without pushing it on the marking stack.
592 ASSERT(!weak_map->unchecked_table()->IsMarked()); 976 ASSERT(!MarkCompactCollector::IsMarked(weak_map->unchecked_table()));
593 ASSERT(weak_map->unchecked_table()->map()->IsMarked()); 977 ASSERT(MarkCompactCollector::IsMarked(weak_map->unchecked_table()->map()));
594 collector->SetMark(weak_map->unchecked_table()); 978
979 HeapObject* unchecked_table = weak_map->unchecked_table();
980 MarkBit mark_bit = Marking::MarkBitFrom(unchecked_table);
981 collector->SetMark(unchecked_table, mark_bit);
595 } 982 }
596 983
597 static void VisitCode(Map* map, HeapObject* object) { 984 static void VisitCode(Map* map, HeapObject* object) {
598 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>( 985 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
599 map->heap()); 986 map->GetHeap());
600 } 987 }
601 988
602 // Code flushing support. 989 // Code flushing support.
603 990
604 // How many collections newly compiled code object will survive before being 991 // How many collections newly compiled code object will survive before being
605 // flushed. 992 // flushed.
606 static const int kCodeAgeThreshold = 5; 993 static const int kCodeAgeThreshold = 5;
607 994
608 static const int kRegExpCodeThreshold = 5; 995 static const int kRegExpCodeThreshold = 5;
609 996
610 inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) { 997 inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
611 Object* undefined = heap->raw_unchecked_undefined_value(); 998 Object* undefined = heap->undefined_value();
612 return (info->script() != undefined) && 999 return (info->script() != undefined) &&
613 (reinterpret_cast<Script*>(info->script())->source() != undefined); 1000 (reinterpret_cast<Script*>(info->script())->source() != undefined);
614 } 1001 }
615 1002
616 1003
617 inline static bool IsCompiled(JSFunction* function) { 1004 inline static bool IsCompiled(JSFunction* function) {
618 return function->unchecked_code() != 1005 return function->unchecked_code() !=
619 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile); 1006 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
620 } 1007 }
621 1008
622 inline static bool IsCompiled(SharedFunctionInfo* function) { 1009 inline static bool IsCompiled(SharedFunctionInfo* function) {
623 return function->unchecked_code() != 1010 return function->unchecked_code() !=
624 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile); 1011 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
625 } 1012 }
626 1013
627 inline static bool IsFlushable(Heap* heap, JSFunction* function) { 1014 inline static bool IsFlushable(Heap* heap, JSFunction* function) {
628 SharedFunctionInfo* shared_info = function->unchecked_shared(); 1015 SharedFunctionInfo* shared_info = function->unchecked_shared();
629 1016
630 // Code is either on stack, in compilation cache or referenced 1017 // Code is either on stack, in compilation cache or referenced
631 // by optimized version of function. 1018 // by optimized version of function.
632 if (function->unchecked_code()->IsMarked()) { 1019 MarkBit code_mark =
1020 Marking::MarkBitFrom(function->unchecked_code());
1021 if (code_mark.Get()) {
633 shared_info->set_code_age(0); 1022 shared_info->set_code_age(0);
634 return false; 1023 return false;
635 } 1024 }
636 1025
637 // We do not flush code for optimized functions. 1026 // We do not flush code for optimized functions.
638 if (function->code() != shared_info->unchecked_code()) { 1027 if (function->code() != shared_info->unchecked_code()) {
639 return false; 1028 return false;
640 } 1029 }
641 1030
642 return IsFlushable(heap, shared_info); 1031 return IsFlushable(heap, shared_info);
643 } 1032 }
644 1033
645 inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) { 1034 inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
646 // Code is either on stack, in compilation cache or referenced 1035 // Code is either on stack, in compilation cache or referenced
647 // by optimized version of function. 1036 // by optimized version of function.
648 if (shared_info->unchecked_code()->IsMarked()) { 1037 MarkBit code_mark =
1038 Marking::MarkBitFrom(shared_info->unchecked_code());
1039 if (code_mark.Get()) {
649 shared_info->set_code_age(0); 1040 shared_info->set_code_age(0);
650 return false; 1041 return false;
651 } 1042 }
652 1043
653 // The function must be compiled and have the source code available, 1044 // The function must be compiled and have the source code available,
654 // to be able to recompile it in case we need the function again. 1045 // to be able to recompile it in case we need the function again.
655 if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) { 1046 if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
656 return false; 1047 return false;
657 } 1048 }
658 1049
659 // We never flush code for Api functions. 1050 // We never flush code for Api functions.
660 Object* function_data = shared_info->function_data(); 1051 Object* function_data = shared_info->function_data();
661 if (function_data->IsHeapObject() && 1052 if (function_data->IsFunctionTemplateInfo()) return false;
662 (SafeMap(function_data)->instance_type() ==
663 FUNCTION_TEMPLATE_INFO_TYPE)) {
664 return false;
665 }
666 1053
667 // Only flush code for functions. 1054 // Only flush code for functions.
668 if (shared_info->code()->kind() != Code::FUNCTION) return false; 1055 if (shared_info->code()->kind() != Code::FUNCTION) return false;
669 1056
670 // Function must be lazy compilable. 1057 // Function must be lazy compilable.
671 if (!shared_info->allows_lazy_compilation()) return false; 1058 if (!shared_info->allows_lazy_compilation()) return false;
672 1059
673 // If this is a full script wrapped in a function we do no flush the code. 1060 // If this is a full script wrapped in a function we do no flush the code.
674 if (shared_info->is_toplevel()) return false; 1061 if (shared_info->is_toplevel()) return false;
675 1062
(...skipping 12 matching lines...) Expand all
688 1075
689 // This function's code looks flushable. But we have to postpone the 1076 // This function's code looks flushable. But we have to postpone the
690 // decision until we see all functions that point to the same 1077 // decision until we see all functions that point to the same
691 // SharedFunctionInfo because some of them might be optimized. 1078 // SharedFunctionInfo because some of them might be optimized.
692 // That would make the nonoptimized version of the code nonflushable, 1079 // That would make the nonoptimized version of the code nonflushable,
693 // because it is required for bailing out from optimized code. 1080 // because it is required for bailing out from optimized code.
694 heap->mark_compact_collector()->code_flusher()->AddCandidate(function); 1081 heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
695 return true; 1082 return true;
696 } 1083 }
697 1084
698 1085 static inline bool IsValidNotBuiltinContext(Object* ctx) {
699 static inline Map* SafeMap(Object* obj) { 1086 return ctx->IsContext() &&
700 MapWord map_word = HeapObject::cast(obj)->map_word(); 1087 !Context::cast(ctx)->global()->IsJSBuiltinsObject();
701 map_word.ClearMark();
702 map_word.ClearOverflow();
703 return map_word.ToMap();
704 } 1088 }
705 1089
706 1090
707 static inline bool IsJSBuiltinsObject(Object* obj) {
708 return obj->IsHeapObject() &&
709 (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
710 }
711
712
713 static inline bool IsValidNotBuiltinContext(Object* ctx) {
714 if (!ctx->IsHeapObject()) return false;
715
716 Map* map = SafeMap(ctx);
717 Heap* heap = map->heap();
718 if (!(map == heap->raw_unchecked_function_context_map() ||
719 map == heap->raw_unchecked_catch_context_map() ||
720 map == heap->raw_unchecked_with_context_map() ||
721 map == heap->raw_unchecked_global_context_map())) {
722 return false;
723 }
724
725 Context* context = reinterpret_cast<Context*>(ctx);
726
727 if (IsJSBuiltinsObject(context->global())) {
728 return false;
729 }
730
731 return true;
732 }
733
734
735 static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) { 1091 static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
736 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); 1092 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
737 1093
738 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap(); 1094 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
739 1095
740 FixedBodyVisitor<StaticMarkingVisitor, 1096 FixedBodyVisitor<StaticMarkingVisitor,
741 SharedFunctionInfo::BodyDescriptor, 1097 SharedFunctionInfo::BodyDescriptor,
742 void>::Visit(map, object); 1098 void>::Visit(map, object);
743 } 1099 }
744 1100
745 1101
746 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, 1102 static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
747 JSRegExp* re, 1103 JSRegExp* re,
748 bool is_ascii) { 1104 bool is_ascii) {
749 // Make sure that the fixed array is in fact initialized on the RegExp. 1105 // Make sure that the fixed array is in fact initialized on the RegExp.
750 // We could potentially trigger a GC when initializing the RegExp. 1106 // We could potentially trigger a GC when initializing the RegExp.
751 if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return; 1107 if (HeapObject::cast(re->data())->map()->instance_type() !=
1108 FIXED_ARRAY_TYPE) return;
752 1109
753 // Make sure this is a RegExp that actually contains code. 1110 // Make sure this is a RegExp that actually contains code.
754 if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return; 1111 if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
755 1112
756 Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii)); 1113 Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
757 if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) { 1114 if (!code->IsSmi() &&
1115 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
758 // Save a copy that can be reinstated if we need the code again. 1116 // Save a copy that can be reinstated if we need the code again.
759 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii), 1117 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
760 code, 1118 code,
761 heap); 1119 heap);
762 // Set a number in the 0-255 range to guarantee no smi overflow. 1120 // Set a number in the 0-255 range to guarantee no smi overflow.
763 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii), 1121 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
764 Smi::FromInt(heap->sweep_generation() & 0xff), 1122 Smi::FromInt(heap->sweep_generation() & 0xff),
765 heap); 1123 heap);
766 } else if (code->IsSmi()) { 1124 } else if (code->IsSmi()) {
767 int value = Smi::cast(code)->value(); 1125 int value = Smi::cast(code)->value();
(...skipping 15 matching lines...) Expand all
783 } 1141 }
784 } 1142 }
785 1143
786 1144
787 // Works by setting the current sweep_generation (as a smi) in the 1145 // Works by setting the current sweep_generation (as a smi) in the
788 // code object place in the data array of the RegExp and keeps a copy 1146 // code object place in the data array of the RegExp and keeps a copy
789 // around that can be reinstated if we reuse the RegExp before flushing. 1147 // around that can be reinstated if we reuse the RegExp before flushing.
790 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs 1148 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
791 // we flush the code. 1149 // we flush the code.
792 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) { 1150 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
793 Heap* heap = map->heap(); 1151 Heap* heap = map->GetHeap();
794 MarkCompactCollector* collector = heap->mark_compact_collector(); 1152 MarkCompactCollector* collector = heap->mark_compact_collector();
795 if (!collector->is_code_flushing_enabled()) { 1153 if (!collector->is_code_flushing_enabled()) {
796 VisitJSRegExpFields(map, object); 1154 VisitJSRegExpFields(map, object);
797 return; 1155 return;
798 } 1156 }
799 JSRegExp* re = reinterpret_cast<JSRegExp*>(object); 1157 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
800 // Flush code or set age on both ascii and two byte code. 1158 // Flush code or set age on both ascii and two byte code.
801 UpdateRegExpCodeAgeAndFlush(heap, re, true); 1159 UpdateRegExpCodeAgeAndFlush(heap, re, true);
802 UpdateRegExpCodeAgeAndFlush(heap, re, false); 1160 UpdateRegExpCodeAgeAndFlush(heap, re, false);
803 // Visit the fields of the RegExp, including the updated FixedArray. 1161 // Visit the fields of the RegExp, including the updated FixedArray.
804 VisitJSRegExpFields(map, object); 1162 VisitJSRegExpFields(map, object);
805 } 1163 }
806 1164
807 1165
808 static void VisitSharedFunctionInfoAndFlushCode(Map* map, 1166 static void VisitSharedFunctionInfoAndFlushCode(Map* map,
809 HeapObject* object) { 1167 HeapObject* object) {
810 MarkCompactCollector* collector = map->heap()->mark_compact_collector(); 1168 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
811 if (!collector->is_code_flushing_enabled()) { 1169 if (!collector->is_code_flushing_enabled()) {
812 VisitSharedFunctionInfoGeneric(map, object); 1170 VisitSharedFunctionInfoGeneric(map, object);
813 return; 1171 return;
814 } 1172 }
815 VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false); 1173 VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
816 } 1174 }
817 1175
818 1176
819 static void VisitSharedFunctionInfoAndFlushCodeGeneric( 1177 static void VisitSharedFunctionInfoAndFlushCodeGeneric(
820 Map* map, HeapObject* object, bool known_flush_code_candidate) { 1178 Map* map, HeapObject* object, bool known_flush_code_candidate) {
821 Heap* heap = map->heap(); 1179 Heap* heap = map->GetHeap();
822 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); 1180 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
823 1181
824 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap(); 1182 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
825 1183
826 if (!known_flush_code_candidate) { 1184 if (!known_flush_code_candidate) {
827 known_flush_code_candidate = IsFlushable(heap, shared); 1185 known_flush_code_candidate = IsFlushable(heap, shared);
828 if (known_flush_code_candidate) { 1186 if (known_flush_code_candidate) {
829 heap->mark_compact_collector()->code_flusher()->AddCandidate(shared); 1187 heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
830 } 1188 }
831 } 1189 }
832 1190
833 VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate); 1191 VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
834 } 1192 }
835 1193
836 1194
837 static void VisitCodeEntry(Heap* heap, Address entry_address) { 1195 static void VisitCodeEntry(Heap* heap, Address entry_address) {
838 Object* code = Code::GetObjectFromEntryAddress(entry_address); 1196 Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
839 Object* old_code = code; 1197 MarkBit mark = Marking::MarkBitFrom(code);
840 VisitPointer(heap, &code); 1198 heap->mark_compact_collector()->MarkObject(code, mark);
841 if (code != old_code) { 1199 heap->mark_compact_collector()->
842 Memory::Address_at(entry_address) = 1200 RecordCodeEntrySlot(entry_address, code);
843 reinterpret_cast<Code*>(code)->entry(); 1201 }
1202
1203 static void VisitGlobalContext(Map* map, HeapObject* object) {
1204 FixedBodyVisitor<StaticMarkingVisitor,
1205 Context::MarkCompactBodyDescriptor,
1206 void>::Visit(map, object);
1207
1208 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1209 for (int idx = Context::FIRST_WEAK_SLOT;
1210 idx < Context::GLOBAL_CONTEXT_SLOTS;
1211 ++idx) {
1212 Object** slot =
1213 HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
1214 collector->RecordSlot(slot, slot, *slot);
844 } 1215 }
845 } 1216 }
846 1217
847
848 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) { 1218 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
849 Heap* heap = map->heap(); 1219 Heap* heap = map->GetHeap();
850 MarkCompactCollector* collector = heap->mark_compact_collector(); 1220 MarkCompactCollector* collector = heap->mark_compact_collector();
851 if (!collector->is_code_flushing_enabled()) { 1221 if (!collector->is_code_flushing_enabled()) {
852 VisitJSFunction(map, object); 1222 VisitJSFunction(map, object);
853 return; 1223 return;
854 } 1224 }
855 1225
856 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object); 1226 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
857 // The function must have a valid context and not be a builtin. 1227 // The function must have a valid context and not be a builtin.
858 bool flush_code_candidate = false; 1228 bool flush_code_candidate = false;
859 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) { 1229 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
860 flush_code_candidate = FlushCodeForFunction(heap, jsfunction); 1230 flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
861 } 1231 }
862 1232
863 if (!flush_code_candidate) { 1233 if (!flush_code_candidate) {
864 collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code()); 1234 Code* code = jsfunction->unchecked_shared()->unchecked_code();
1235 MarkBit code_mark = Marking::MarkBitFrom(code);
1236 heap->mark_compact_collector()->MarkObject(code, code_mark);
865 1237
866 if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) { 1238 if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
867 // For optimized functions we should retain both non-optimized version 1239 // For optimized functions we should retain both non-optimized version
868 // of it's code and non-optimized version of all inlined functions. 1240 // of it's code and non-optimized version of all inlined functions.
869 // This is required to support bailing out from inlined code. 1241 // This is required to support bailing out from inlined code.
870 DeoptimizationInputData* data = 1242 DeoptimizationInputData* data =
871 reinterpret_cast<DeoptimizationInputData*>( 1243 reinterpret_cast<DeoptimizationInputData*>(
872 jsfunction->unchecked_code()->unchecked_deoptimization_data()); 1244 jsfunction->unchecked_code()->unchecked_deoptimization_data());
873 1245
874 FixedArray* literals = data->UncheckedLiteralArray(); 1246 FixedArray* literals = data->UncheckedLiteralArray();
875 1247
876 for (int i = 0, count = data->InlinedFunctionCount()->value(); 1248 for (int i = 0, count = data->InlinedFunctionCount()->value();
877 i < count; 1249 i < count;
878 i++) { 1250 i++) {
879 JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i)); 1251 JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
880 collector->MarkObject(inlined->unchecked_shared()->unchecked_code()); 1252 Code* inlined_code = inlined->unchecked_shared()->unchecked_code();
1253 MarkBit inlined_code_mark =
1254 Marking::MarkBitFrom(inlined_code);
1255 heap->mark_compact_collector()->MarkObject(
1256 inlined_code, inlined_code_mark);
881 } 1257 }
882 } 1258 }
883 } 1259 }
884 1260
885 VisitJSFunctionFields(map, 1261 VisitJSFunctionFields(map,
886 reinterpret_cast<JSFunction*>(object), 1262 reinterpret_cast<JSFunction*>(object),
887 flush_code_candidate); 1263 flush_code_candidate);
888 } 1264 }
889 1265
890 1266
891 static void VisitJSFunction(Map* map, HeapObject* object) { 1267 static void VisitJSFunction(Map* map, HeapObject* object) {
892 VisitJSFunctionFields(map, 1268 VisitJSFunctionFields(map,
893 reinterpret_cast<JSFunction*>(object), 1269 reinterpret_cast<JSFunction*>(object),
894 false); 1270 false);
895 } 1271 }
896 1272
897 1273
898 #define SLOT_ADDR(obj, offset) \ 1274 #define SLOT_ADDR(obj, offset) \
899 reinterpret_cast<Object**>((obj)->address() + offset) 1275 reinterpret_cast<Object**>((obj)->address() + offset)
900 1276
901 1277
902 static inline void VisitJSFunctionFields(Map* map, 1278 static inline void VisitJSFunctionFields(Map* map,
903 JSFunction* object, 1279 JSFunction* object,
904 bool flush_code_candidate) { 1280 bool flush_code_candidate) {
905 Heap* heap = map->heap(); 1281 Heap* heap = map->GetHeap();
906 MarkCompactCollector* collector = heap->mark_compact_collector();
907 1282
908 VisitPointers(heap, 1283 VisitPointers(heap,
909 SLOT_ADDR(object, JSFunction::kPropertiesOffset), 1284 HeapObject::RawField(object, JSFunction::kPropertiesOffset),
910 SLOT_ADDR(object, JSFunction::kCodeEntryOffset)); 1285 HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
911 1286
912 if (!flush_code_candidate) { 1287 if (!flush_code_candidate) {
913 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); 1288 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
914 } else { 1289 } else {
915 // Don't visit code object. 1290 // Don't visit code object.
916 1291
917 // Visit shared function info to avoid double checking of it's 1292 // Visit shared function info to avoid double checking of it's
918 // flushability. 1293 // flushability.
919 SharedFunctionInfo* shared_info = object->unchecked_shared(); 1294 SharedFunctionInfo* shared_info = object->unchecked_shared();
920 if (!shared_info->IsMarked()) { 1295 MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
1296 if (!shared_info_mark.Get()) {
921 Map* shared_info_map = shared_info->map(); 1297 Map* shared_info_map = shared_info->map();
922 collector->SetMark(shared_info); 1298 MarkBit shared_info_map_mark =
923 collector->MarkObject(shared_info_map); 1299 Marking::MarkBitFrom(shared_info_map);
1300 heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
1301 heap->mark_compact_collector()->MarkObject(shared_info_map,
1302 shared_info_map_mark);
924 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map, 1303 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
925 shared_info, 1304 shared_info,
926 true); 1305 true);
927 } 1306 }
928 } 1307 }
929 1308
930 VisitPointers(heap, 1309 VisitPointers(
931 SLOT_ADDR(object, 1310 heap,
932 JSFunction::kCodeEntryOffset + kPointerSize), 1311 HeapObject::RawField(object,
933 SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset)); 1312 JSFunction::kCodeEntryOffset + kPointerSize),
1313 HeapObject::RawField(object,
1314 JSFunction::kNonWeakFieldsEndOffset));
934 1315
935 // Don't visit the next function list field as it is a weak reference. 1316 // Don't visit the next function list field as it is a weak reference.
1317 Object** next_function =
1318 HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset);
1319 heap->mark_compact_collector()->RecordSlot(
1320 next_function, next_function, *next_function);
936 } 1321 }
937 1322
938 static inline void VisitJSRegExpFields(Map* map, 1323 static inline void VisitJSRegExpFields(Map* map,
939 HeapObject* object) { 1324 HeapObject* object) {
940 int last_property_offset = 1325 int last_property_offset =
941 JSRegExp::kSize + kPointerSize * map->inobject_properties(); 1326 JSRegExp::kSize + kPointerSize * map->inobject_properties();
942 VisitPointers(map->heap(), 1327 VisitPointers(map->GetHeap(),
943 SLOT_ADDR(object, JSRegExp::kPropertiesOffset), 1328 SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
944 SLOT_ADDR(object, last_property_offset)); 1329 SLOT_ADDR(object, last_property_offset));
945 } 1330 }
946 1331
947 1332
948 static void VisitSharedFunctionInfoFields(Heap* heap, 1333 static void VisitSharedFunctionInfoFields(Heap* heap,
949 HeapObject* object, 1334 HeapObject* object,
950 bool flush_code_candidate) { 1335 bool flush_code_candidate) {
951 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset)); 1336 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
952 1337
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
988 }; 1373 };
989 1374
990 1375
991 class CodeMarkingVisitor : public ThreadVisitor { 1376 class CodeMarkingVisitor : public ThreadVisitor {
992 public: 1377 public:
993 explicit CodeMarkingVisitor(MarkCompactCollector* collector) 1378 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
994 : collector_(collector) {} 1379 : collector_(collector) {}
995 1380
996 void VisitThread(Isolate* isolate, ThreadLocalTop* top) { 1381 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
997 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { 1382 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
998 collector_->MarkObject(it.frame()->unchecked_code()); 1383 Code* code = it.frame()->unchecked_code();
1384 MarkBit code_bit = Marking::MarkBitFrom(code);
1385 collector_->MarkObject(it.frame()->unchecked_code(), code_bit);
999 } 1386 }
1000 } 1387 }
1001 1388
1002 private: 1389 private:
1003 MarkCompactCollector* collector_; 1390 MarkCompactCollector* collector_;
1004 }; 1391 };
1005 1392
1006 1393
1007 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { 1394 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1008 public: 1395 public:
1009 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector) 1396 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1010 : collector_(collector) {} 1397 : collector_(collector) {}
1011 1398
1012 void VisitPointers(Object** start, Object** end) { 1399 void VisitPointers(Object** start, Object** end) {
1013 for (Object** p = start; p < end; p++) VisitPointer(p); 1400 for (Object** p = start; p < end; p++) VisitPointer(p);
1014 } 1401 }
1015 1402
1016 void VisitPointer(Object** slot) { 1403 void VisitPointer(Object** slot) {
1017 Object* obj = *slot; 1404 Object* obj = *slot;
1018 if (obj->IsSharedFunctionInfo()) { 1405 if (obj->IsSharedFunctionInfo()) {
1019 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); 1406 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1020 collector_->MarkObject(shared->unchecked_code()); 1407 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1021 collector_->MarkObject(shared); 1408 MarkBit code_mark = Marking::MarkBitFrom(shared->unchecked_code());
1409 collector_->MarkObject(shared->unchecked_code(), code_mark);
1410 collector_->MarkObject(shared, shared_mark);
1022 } 1411 }
1023 } 1412 }
1024 1413
1025 private: 1414 private:
1026 MarkCompactCollector* collector_; 1415 MarkCompactCollector* collector_;
1027 }; 1416 };
1028 1417
1029 1418
1030 void MarkCompactCollector::PrepareForCodeFlushing() { 1419 void MarkCompactCollector::PrepareForCodeFlushing() {
1031 ASSERT(heap() == Isolate::Current()->heap()); 1420 ASSERT(heap() == Isolate::Current()->heap());
1032 1421
1033 if (!FLAG_flush_code) { 1422 // TODO(1609) Currently incremental marker does not support code flushing.
1423 if (!FLAG_flush_code || was_marked_incrementally_) {
1034 EnableCodeFlushing(false); 1424 EnableCodeFlushing(false);
1035 return; 1425 return;
1036 } 1426 }
1037 1427
1038 #ifdef ENABLE_DEBUGGER_SUPPORT 1428 #ifdef ENABLE_DEBUGGER_SUPPORT
1039 if (heap()->isolate()->debug()->IsLoaded() || 1429 if (heap()->isolate()->debug()->IsLoaded() ||
1040 heap()->isolate()->debug()->has_break_points()) { 1430 heap()->isolate()->debug()->has_break_points()) {
1041 EnableCodeFlushing(false); 1431 EnableCodeFlushing(false);
1042 return; 1432 return;
1043 } 1433 }
1044 #endif 1434 #endif
1435
1045 EnableCodeFlushing(true); 1436 EnableCodeFlushing(true);
1046 1437
1047 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray 1438 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1048 // relies on it being marked before any other descriptor array. 1439 // relies on it being marked before any other descriptor array.
1049 MarkObject(heap()->raw_unchecked_empty_descriptor_array()); 1440 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1441 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1442 MarkObject(descriptor_array, descriptor_array_mark);
1050 1443
1051 // Make sure we are not referencing the code from the stack. 1444 // Make sure we are not referencing the code from the stack.
1052 ASSERT(this == heap()->mark_compact_collector()); 1445 ASSERT(this == heap()->mark_compact_collector());
1053 for (StackFrameIterator it; !it.done(); it.Advance()) { 1446 for (StackFrameIterator it; !it.done(); it.Advance()) {
1054 MarkObject(it.frame()->unchecked_code()); 1447 Code* code = it.frame()->unchecked_code();
1448 MarkBit code_mark = Marking::MarkBitFrom(code);
1449 MarkObject(code, code_mark);
1055 } 1450 }
1056 1451
1057 // Iterate the archived stacks in all threads to check if 1452 // Iterate the archived stacks in all threads to check if
1058 // the code is referenced. 1453 // the code is referenced.
1059 CodeMarkingVisitor code_marking_visitor(this); 1454 CodeMarkingVisitor code_marking_visitor(this);
1060 heap()->isolate()->thread_manager()->IterateArchivedThreads( 1455 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1061 &code_marking_visitor); 1456 &code_marking_visitor);
1062 1457
1063 SharedFunctionInfoMarkingVisitor visitor(this); 1458 SharedFunctionInfoMarkingVisitor visitor(this);
1064 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); 1459 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1065 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); 1460 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1066 1461
1067 ProcessMarkingStack(); 1462 ProcessMarkingDeque();
1068 } 1463 }
1069 1464
1070 1465
1071 // Visitor class for marking heap roots. 1466 // Visitor class for marking heap roots.
1072 class RootMarkingVisitor : public ObjectVisitor { 1467 class RootMarkingVisitor : public ObjectVisitor {
1073 public: 1468 public:
1074 explicit RootMarkingVisitor(Heap* heap) 1469 explicit RootMarkingVisitor(Heap* heap)
1075 : collector_(heap->mark_compact_collector()) { } 1470 : collector_(heap->mark_compact_collector()) { }
1076 1471
1077 void VisitPointer(Object** p) { 1472 void VisitPointer(Object** p) {
1078 MarkObjectByPointer(p); 1473 MarkObjectByPointer(p);
1079 } 1474 }
1080 1475
1081 void VisitPointers(Object** start, Object** end) { 1476 void VisitPointers(Object** start, Object** end) {
1082 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); 1477 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1083 } 1478 }
1084 1479
1085 private: 1480 private:
1086 void MarkObjectByPointer(Object** p) { 1481 void MarkObjectByPointer(Object** p) {
1087 if (!(*p)->IsHeapObject()) return; 1482 if (!(*p)->IsHeapObject()) return;
1088 1483
1089 // Replace flat cons strings in place. 1484 // Replace flat cons strings in place.
1090 HeapObject* object = ShortCircuitConsString(p); 1485 HeapObject* object = ShortCircuitConsString(p);
1091 if (object->IsMarked()) return; 1486 MarkBit mark_bit = Marking::MarkBitFrom(object);
1487 if (mark_bit.Get()) return;
1092 1488
1093 Map* map = object->map(); 1489 Map* map = object->map();
1094 // Mark the object. 1490 // Mark the object.
1095 collector_->SetMark(object); 1491 collector_->SetMark(object, mark_bit);
1096 1492
1097 // Mark the map pointer and body, and push them on the marking stack. 1493 // Mark the map pointer and body, and push them on the marking stack.
1098 collector_->MarkObject(map); 1494 MarkBit map_mark = Marking::MarkBitFrom(map);
1495 collector_->MarkObject(map, map_mark);
1099 StaticMarkingVisitor::IterateBody(map, object); 1496 StaticMarkingVisitor::IterateBody(map, object);
1100 1497
1101 // Mark all the objects reachable from the map and body. May leave 1498 // Mark all the objects reachable from the map and body. May leave
1102 // overflowed objects in the heap. 1499 // overflowed objects in the heap.
1103 collector_->EmptyMarkingStack(); 1500 collector_->EmptyMarkingDeque();
1104 } 1501 }
1105 1502
1106 MarkCompactCollector* collector_; 1503 MarkCompactCollector* collector_;
1107 }; 1504 };
1108 1505
1109 1506
1110 // Helper class for pruning the symbol table. 1507 // Helper class for pruning the symbol table.
1111 class SymbolTableCleaner : public ObjectVisitor { 1508 class SymbolTableCleaner : public ObjectVisitor {
1112 public: 1509 public:
1113 explicit SymbolTableCleaner(Heap* heap) 1510 explicit SymbolTableCleaner(Heap* heap)
1114 : heap_(heap), pointers_removed_(0) { } 1511 : heap_(heap), pointers_removed_(0) { }
1115 1512
1116 virtual void VisitPointers(Object** start, Object** end) { 1513 virtual void VisitPointers(Object** start, Object** end) {
1117 // Visit all HeapObject pointers in [start, end). 1514 // Visit all HeapObject pointers in [start, end).
1118 for (Object** p = start; p < end; p++) { 1515 for (Object** p = start; p < end; p++) {
1119 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) { 1516 Object* o = *p;
1517 if (o->IsHeapObject() &&
1518 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1120 // Check if the symbol being pruned is an external symbol. We need to 1519 // Check if the symbol being pruned is an external symbol. We need to
1121 // delete the associated external data as this symbol is going away. 1520 // delete the associated external data as this symbol is going away.
1122 1521
1123 // Since no objects have yet been moved we can safely access the map of 1522 // Since no objects have yet been moved we can safely access the map of
1124 // the object. 1523 // the object.
1125 if ((*p)->IsExternalString()) { 1524 if (o->IsExternalString()) {
1126 heap_->FinalizeExternalString(String::cast(*p)); 1525 heap_->FinalizeExternalString(String::cast(*p));
1127 } 1526 }
1128 // Set the entry to null_value (as deleted). 1527 // Set the entry to null_value (as deleted).
1129 *p = heap_->raw_unchecked_null_value(); 1528 *p = heap_->null_value();
1130 pointers_removed_++; 1529 pointers_removed_++;
1131 } 1530 }
1132 } 1531 }
1133 } 1532 }
1134 1533
1135 int PointersRemoved() { 1534 int PointersRemoved() {
1136 return pointers_removed_; 1535 return pointers_removed_;
1137 } 1536 }
1138 1537
1139 private: 1538 private:
1140 Heap* heap_; 1539 Heap* heap_;
1141 int pointers_removed_; 1540 int pointers_removed_;
1142 }; 1541 };
1143 1542
1144 1543
1145 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects 1544 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1146 // are retained. 1545 // are retained.
1147 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { 1546 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1148 public: 1547 public:
1149 virtual Object* RetainAs(Object* object) { 1548 virtual Object* RetainAs(Object* object) {
1150 MapWord first_word = HeapObject::cast(object)->map_word(); 1549 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1151 if (first_word.IsMarked()) {
1152 return object; 1550 return object;
1153 } else { 1551 } else {
1154 return NULL; 1552 return NULL;
1155 } 1553 }
1156 } 1554 }
1157 }; 1555 };
1158 1556
1159 1557
1160 void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) { 1558 void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
1161 ASSERT(!object->IsMarked()); 1559 ASSERT(IsMarked(object));
1162 ASSERT(HEAP->Contains(object)); 1560 ASSERT(HEAP->Contains(object));
1163 if (object->IsMap()) { 1561 if (object->IsMap()) {
1164 Map* map = Map::cast(object); 1562 Map* map = Map::cast(object);
1165 if (FLAG_cleanup_code_caches_at_gc) { 1563 if (FLAG_cleanup_code_caches_at_gc) {
1166 map->ClearCodeCache(heap()); 1564 map->ClearCodeCache(heap());
1167 } 1565 }
1168 SetMark(map);
1169 1566
1170 // When map collection is enabled we have to mark through map's transitions 1567 // When map collection is enabled we have to mark through map's transitions
1171 // in a special way to make transition links weak. 1568 // in a special way to make transition links weak.
1172 // Only maps for subclasses of JSReceiver can have transitions. 1569 // Only maps for subclasses of JSReceiver can have transitions.
1173 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); 1570 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1174 if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { 1571 if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
1175 MarkMapContents(map); 1572 MarkMapContents(map);
1176 } else { 1573 } else {
1177 marking_stack_.Push(map); 1574 marking_deque_.PushBlack(map);
1178 } 1575 }
1179 } else { 1576 } else {
1180 SetMark(object); 1577 marking_deque_.PushBlack(object);
1181 marking_stack_.Push(object);
1182 } 1578 }
1183 } 1579 }
1184 1580
1185 1581
1186 void MarkCompactCollector::MarkMapContents(Map* map) { 1582 void MarkCompactCollector::MarkMapContents(Map* map) {
1187 // Mark prototype transitions array but don't push it into marking stack. 1583 // Mark prototype transitions array but don't push it into marking stack.
1188 // This will make references from it weak. We will clean dead prototype 1584 // This will make references from it weak. We will clean dead prototype
1189 // transitions in ClearNonLiveTransitions. 1585 // transitions in ClearNonLiveTransitions.
1190 FixedArray* prototype_transitions = map->unchecked_prototype_transitions(); 1586 FixedArray* prototype_transitions = map->prototype_transitions();
1191 if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions); 1587 MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
1588 if (!mark.Get()) {
1589 mark.Set();
1590 MemoryChunk::IncrementLiveBytes(prototype_transitions->address(),
1591 prototype_transitions->Size());
1592 }
1192 1593
1193 Object* raw_descriptor_array = 1594 Object** raw_descriptor_array_slot =
1194 *HeapObject::RawField(map, 1595 HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
1195 Map::kInstanceDescriptorsOrBitField3Offset); 1596 Object* raw_descriptor_array = *raw_descriptor_array_slot;
1196 if (!raw_descriptor_array->IsSmi()) { 1597 if (!raw_descriptor_array->IsSmi()) {
1197 MarkDescriptorArray( 1598 MarkDescriptorArray(
1198 reinterpret_cast<DescriptorArray*>(raw_descriptor_array)); 1599 reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
1199 } 1600 }
1200 1601
1201 // Mark the Object* fields of the Map. 1602 // Mark the Object* fields of the Map.
1202 // Since the descriptor array has been marked already, it is fine 1603 // Since the descriptor array has been marked already, it is fine
1203 // that one of these fields contains a pointer to it. 1604 // that one of these fields contains a pointer to it.
1204 Object** start_slot = HeapObject::RawField(map, 1605 Object** start_slot = HeapObject::RawField(map,
1205 Map::kPointerFieldsBeginOffset); 1606 Map::kPointerFieldsBeginOffset);
1206 1607
1207 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); 1608 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
1208 1609
1209 StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot); 1610 StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
1210 } 1611 }
1211 1612
1212 1613
1213 void MarkCompactCollector::MarkDescriptorArray( 1614 void MarkCompactCollector::MarkDescriptorArray(
1214 DescriptorArray* descriptors) { 1615 DescriptorArray* descriptors) {
1215 if (descriptors->IsMarked()) return; 1616 MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
1617 if (descriptors_mark.Get()) return;
1216 // Empty descriptor array is marked as a root before any maps are marked. 1618 // Empty descriptor array is marked as a root before any maps are marked.
1217 ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array()); 1619 ASSERT(descriptors != heap()->empty_descriptor_array());
1218 SetMark(descriptors); 1620 SetMark(descriptors, descriptors_mark);
1219 1621
1220 FixedArray* contents = reinterpret_cast<FixedArray*>( 1622 FixedArray* contents = reinterpret_cast<FixedArray*>(
1221 descriptors->get(DescriptorArray::kContentArrayIndex)); 1623 descriptors->get(DescriptorArray::kContentArrayIndex));
1222 ASSERT(contents->IsHeapObject()); 1624 ASSERT(contents->IsHeapObject());
1223 ASSERT(!contents->IsMarked()); 1625 ASSERT(!IsMarked(contents));
1224 ASSERT(contents->IsFixedArray()); 1626 ASSERT(contents->IsFixedArray());
1225 ASSERT(contents->length() >= 2); 1627 ASSERT(contents->length() >= 2);
1226 SetMark(contents); 1628 MarkBit contents_mark = Marking::MarkBitFrom(contents);
1629 SetMark(contents, contents_mark);
1227 // Contents contains (value, details) pairs. If the details say that the type 1630 // Contents contains (value, details) pairs. If the details say that the type
1228 // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, 1631 // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
1229 // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as 1632 // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
1230 // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and 1633 // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
1231 // CONSTANT_TRANSITION is the value an Object* (a Map*). 1634 // CONSTANT_TRANSITION is the value an Object* (a Map*).
1232 for (int i = 0; i < contents->length(); i += 2) { 1635 for (int i = 0; i < contents->length(); i += 2) {
1233 // If the pair (value, details) at index i, i+1 is not 1636 // If the pair (value, details) at index i, i+1 is not
1234 // a transition or null descriptor, mark the value. 1637 // a transition or null descriptor, mark the value.
1235 PropertyDetails details(Smi::cast(contents->get(i + 1))); 1638 PropertyDetails details(Smi::cast(contents->get(i + 1)));
1236 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) { 1639
1237 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i)); 1640 Object** slot = contents->data_start() + i;
1238 if (object->IsHeapObject() && !object->IsMarked()) { 1641 Object* value = *slot;
1239 SetMark(object); 1642 if (!value->IsHeapObject()) continue;
1240 marking_stack_.Push(object); 1643
1644 RecordSlot(slot, slot, *slot);
1645
1646 PropertyType type = details.type();
1647 if (type < FIRST_PHANTOM_PROPERTY_TYPE) {
1648 HeapObject* object = HeapObject::cast(value);
1649 MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
1650 if (!mark.Get()) {
1651 SetMark(HeapObject::cast(object), mark);
1652 marking_deque_.PushBlack(object);
1653 }
1654 } else if (type == ELEMENTS_TRANSITION && value->IsFixedArray()) {
1655 // For maps with multiple elements transitions, the transition maps are
1656 // stored in a FixedArray. Keep the fixed array alive but not the maps
1657 // that it refers to.
1658 HeapObject* object = HeapObject::cast(value);
1659 MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
1660 if (!mark.Get()) {
1661 SetMark(HeapObject::cast(object), mark);
1241 } 1662 }
1242 } 1663 }
1243 } 1664 }
1244 // The DescriptorArray descriptors contains a pointer to its contents array, 1665 // The DescriptorArray descriptors contains a pointer to its contents array,
1245 // but the contents array is already marked. 1666 // but the contents array is already marked.
1246 marking_stack_.Push(descriptors); 1667 marking_deque_.PushBlack(descriptors);
1247 } 1668 }
1248 1669
1249 1670
1250 void MarkCompactCollector::CreateBackPointers() { 1671 void MarkCompactCollector::CreateBackPointers() {
1251 HeapObjectIterator iterator(heap()->map_space()); 1672 HeapObjectIterator iterator(heap()->map_space());
1252 for (HeapObject* next_object = iterator.next(); 1673 for (HeapObject* next_object = iterator.Next();
1253 next_object != NULL; next_object = iterator.next()) { 1674 next_object != NULL; next_object = iterator.Next()) {
1254 if (next_object->IsMap()) { // Could also be ByteArray on free list. 1675 if (next_object->IsMap()) { // Could also be FreeSpace object on free list.
1255 Map* map = Map::cast(next_object); 1676 Map* map = Map::cast(next_object);
1256 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); 1677 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1257 if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { 1678 if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
1258 map->CreateBackPointers(); 1679 map->CreateBackPointers();
1259 } else { 1680 } else {
1260 ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array()); 1681 ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
1261 } 1682 }
1262 } 1683 }
1263 } 1684 }
1264 } 1685 }
1265 1686
1266 1687
1267 static int OverflowObjectSize(HeapObject* obj) { 1688 // Fill the marking stack with overflowed objects returned by the given
1268 // Recover the normal map pointer, it might be marked as live and 1689 // iterator. Stop when the marking stack is filled or the end of the space
1269 // overflowed. 1690 // is reached, whichever comes first.
1270 MapWord map_word = obj->map_word(); 1691 template<class T>
1271 map_word.ClearMark(); 1692 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1272 map_word.ClearOverflow(); 1693 MarkingDeque* marking_deque,
1273 return obj->SizeFromMap(map_word.ToMap()); 1694 T* it) {
1695 // The caller should ensure that the marking stack is initially not full,
1696 // so that we don't waste effort pointlessly scanning for objects.
1697 ASSERT(!marking_deque->IsFull());
1698
1699 Map* filler_map = heap->one_pointer_filler_map();
1700 for (HeapObject* object = it->Next();
1701 object != NULL;
1702 object = it->Next()) {
1703 MarkBit markbit = Marking::MarkBitFrom(object);
1704 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1705 Marking::GreyToBlack(markbit);
1706 MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
1707 marking_deque->PushBlack(object);
1708 if (marking_deque->IsFull()) return;
1709 }
1710 }
1274 } 1711 }
1275 1712
1276 1713
1277 class OverflowedObjectsScanner : public AllStatic { 1714 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1278 public:
1279 // Fill the marking stack with overflowed objects returned by the given
1280 // iterator. Stop when the marking stack is filled or the end of the space
1281 // is reached, whichever comes first.
1282 template<class T>
1283 static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
1284 T* it) {
1285 // The caller should ensure that the marking stack is initially not full,
1286 // so that we don't waste effort pointlessly scanning for objects.
1287 ASSERT(!collector->marking_stack_.is_full());
1288 1715
1289 for (HeapObject* object = it->next(); object != NULL; object = it->next()) { 1716
1290 if (object->IsOverflowed()) { 1717 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
1291 object->ClearOverflow(); 1718 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1292 ASSERT(object->IsMarked()); 1719 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1293 ASSERT(HEAP->Contains(object)); 1720 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1294 collector->marking_stack_.Push(object); 1721 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1295 if (collector->marking_stack_.is_full()) return; 1722
1296 } 1723 MarkBit::CellType* cells = p->markbits()->cells();
1724
1725 int last_cell_index =
1726 Bitmap::IndexToCell(
1727 Bitmap::CellAlignIndex(
1728 p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
1729
1730 int cell_index = Page::kFirstUsedCell;
1731 Address cell_base = p->ObjectAreaStart();
1732
1733 for (cell_index = Page::kFirstUsedCell;
1734 cell_index < last_cell_index;
1735 cell_index++, cell_base += 32 * kPointerSize) {
1736 ASSERT((unsigned)cell_index ==
1737 Bitmap::IndexToCell(
1738 Bitmap::CellAlignIndex(
1739 p->AddressToMarkbitIndex(cell_base))));
1740
1741 const MarkBit::CellType current_cell = cells[cell_index];
1742 if (current_cell == 0) continue;
1743
1744 const MarkBit::CellType next_cell = cells[cell_index + 1];
1745 MarkBit::CellType grey_objects = current_cell &
1746 ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
1747
1748 int offset = 0;
1749 while (grey_objects != 0) {
1750 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
1751 grey_objects >>= trailing_zeros;
1752 offset += trailing_zeros;
1753 MarkBit markbit(&cells[cell_index], 1 << offset, false);
1754 ASSERT(Marking::IsGrey(markbit));
1755 Marking::GreyToBlack(markbit);
1756 Address addr = cell_base + offset * kPointerSize;
1757 HeapObject* object = HeapObject::FromAddress(addr);
1758 MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
1759 marking_deque->PushBlack(object);
1760 if (marking_deque->IsFull()) return;
1761 offset += 2;
1762 grey_objects >>= 2;
1763 }
1764
1765 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1766 }
1767 }
1768
1769
1770 static void DiscoverGreyObjectsInSpace(Heap* heap,
1771 MarkingDeque* marking_deque,
1772 PagedSpace* space) {
1773 if (!space->was_swept_conservatively()) {
1774 HeapObjectIterator it(space);
1775 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
1776 } else {
1777 PageIterator it(space);
1778 while (it.has_next()) {
1779 Page* p = it.next();
1780 DiscoverGreyObjectsOnPage(marking_deque, p);
1781 if (marking_deque->IsFull()) return;
1297 } 1782 }
1298 } 1783 }
1299 }; 1784 }
1300 1785
1301 1786
1302 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { 1787 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1303 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked(); 1788 Object* o = *p;
1789 if (!o->IsHeapObject()) return false;
1790 HeapObject* heap_object = HeapObject::cast(o);
1791 MarkBit mark = Marking::MarkBitFrom(heap_object);
1792 return !mark.Get();
1304 } 1793 }
1305 1794
1306 1795
1307 void MarkCompactCollector::MarkSymbolTable() { 1796 void MarkCompactCollector::MarkSymbolTable() {
1308 SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table(); 1797 SymbolTable* symbol_table = heap()->symbol_table();
1309 // Mark the symbol table itself. 1798 // Mark the symbol table itself.
1310 SetMark(symbol_table); 1799 MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
1800 SetMark(symbol_table, symbol_table_mark);
1311 // Explicitly mark the prefix. 1801 // Explicitly mark the prefix.
1312 MarkingVisitor marker(heap()); 1802 MarkingVisitor marker(heap());
1313 symbol_table->IteratePrefix(&marker); 1803 symbol_table->IteratePrefix(&marker);
1314 ProcessMarkingStack(); 1804 ProcessMarkingDeque();
1315 } 1805 }
1316 1806
1317 1807
1318 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { 1808 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1319 // Mark the heap roots including global variables, stack variables, 1809 // Mark the heap roots including global variables, stack variables,
1320 // etc., and all objects reachable from them. 1810 // etc., and all objects reachable from them.
1321 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); 1811 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1322 1812
1323 // Handle the symbol table specially. 1813 // Handle the symbol table specially.
1324 MarkSymbolTable(); 1814 MarkSymbolTable();
1325 1815
1326 // There may be overflowed objects in the heap. Visit them now. 1816 // There may be overflowed objects in the heap. Visit them now.
1327 while (marking_stack_.overflowed()) { 1817 while (marking_deque_.overflowed()) {
1328 RefillMarkingStack(); 1818 RefillMarkingDeque();
1329 EmptyMarkingStack(); 1819 EmptyMarkingDeque();
1330 } 1820 }
1331 } 1821 }
1332 1822
1333 1823
1334 void MarkCompactCollector::MarkObjectGroups() { 1824 void MarkCompactCollector::MarkObjectGroups() {
1335 List<ObjectGroup*>* object_groups = 1825 List<ObjectGroup*>* object_groups =
1336 heap()->isolate()->global_handles()->object_groups(); 1826 heap()->isolate()->global_handles()->object_groups();
1337 1827
1338 int last = 0; 1828 int last = 0;
1339 for (int i = 0; i < object_groups->length(); i++) { 1829 for (int i = 0; i < object_groups->length(); i++) {
1340 ObjectGroup* entry = object_groups->at(i); 1830 ObjectGroup* entry = object_groups->at(i);
1341 ASSERT(entry != NULL); 1831 ASSERT(entry != NULL);
1342 1832
1343 Object*** objects = entry->objects_; 1833 Object*** objects = entry->objects_;
1344 bool group_marked = false; 1834 bool group_marked = false;
1345 for (size_t j = 0; j < entry->length_; j++) { 1835 for (size_t j = 0; j < entry->length_; j++) {
1346 Object* object = *objects[j]; 1836 Object* object = *objects[j];
1347 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) { 1837 if (object->IsHeapObject()) {
1348 group_marked = true; 1838 HeapObject* heap_object = HeapObject::cast(object);
1349 break; 1839 MarkBit mark = Marking::MarkBitFrom(heap_object);
1840 if (mark.Get()) {
1841 group_marked = true;
1842 break;
1843 }
1350 } 1844 }
1351 } 1845 }
1352 1846
1353 if (!group_marked) { 1847 if (!group_marked) {
1354 (*object_groups)[last++] = entry; 1848 (*object_groups)[last++] = entry;
1355 continue; 1849 continue;
1356 } 1850 }
1357 1851
1358 // An object in the group is marked, so mark all heap objects in 1852 // An object in the group is marked, so mark as grey all white heap
1359 // the group. 1853 // objects in the group.
1360 for (size_t j = 0; j < entry->length_; ++j) { 1854 for (size_t j = 0; j < entry->length_; ++j) {
1361 if ((*objects[j])->IsHeapObject()) { 1855 Object* object = *objects[j];
1362 MarkObject(HeapObject::cast(*objects[j])); 1856 if (object->IsHeapObject()) {
1857 HeapObject* heap_object = HeapObject::cast(object);
1858 MarkBit mark = Marking::MarkBitFrom(heap_object);
1859 MarkObject(heap_object, mark);
1363 } 1860 }
1364 } 1861 }
1365 1862
1366 // Once the entire group has been marked, dispose it because it's 1863 // Once the entire group has been colored grey, set the object group
1367 // not needed anymore. 1864 // to NULL so it won't be processed again.
1368 entry->Dispose(); 1865 entry->Dispose();
1866 object_groups->at(i) = NULL;
1369 } 1867 }
1370 object_groups->Rewind(last); 1868 object_groups->Rewind(last);
1371 } 1869 }
1372 1870
1373 1871
1374 void MarkCompactCollector::MarkImplicitRefGroups() { 1872 void MarkCompactCollector::MarkImplicitRefGroups() {
1375 List<ImplicitRefGroup*>* ref_groups = 1873 List<ImplicitRefGroup*>* ref_groups =
1376 heap()->isolate()->global_handles()->implicit_ref_groups(); 1874 heap()->isolate()->global_handles()->implicit_ref_groups();
1377 1875
1378 int last = 0; 1876 int last = 0;
1379 for (int i = 0; i < ref_groups->length(); i++) { 1877 for (int i = 0; i < ref_groups->length(); i++) {
1380 ImplicitRefGroup* entry = ref_groups->at(i); 1878 ImplicitRefGroup* entry = ref_groups->at(i);
1381 ASSERT(entry != NULL); 1879 ASSERT(entry != NULL);
1382 1880
1383 if (!(*entry->parent_)->IsMarked()) { 1881 if (!IsMarked(*entry->parent_)) {
1384 (*ref_groups)[last++] = entry; 1882 (*ref_groups)[last++] = entry;
1385 continue; 1883 continue;
1386 } 1884 }
1387 1885
1388 Object*** children = entry->children_; 1886 Object*** children = entry->children_;
1389 // A parent object is marked, so mark all child heap objects. 1887 // A parent object is marked, so mark all child heap objects.
1390 for (size_t j = 0; j < entry->length_; ++j) { 1888 for (size_t j = 0; j < entry->length_; ++j) {
1391 if ((*children[j])->IsHeapObject()) { 1889 if ((*children[j])->IsHeapObject()) {
1392 MarkObject(HeapObject::cast(*children[j])); 1890 HeapObject* child = HeapObject::cast(*children[j]);
1891 MarkBit mark = Marking::MarkBitFrom(child);
1892 MarkObject(child, mark);
1393 } 1893 }
1394 } 1894 }
1395 1895
1396 // Once the entire group has been marked, dispose it because it's 1896 // Once the entire group has been marked, dispose it because it's
1397 // not needed anymore. 1897 // not needed anymore.
1398 entry->Dispose(); 1898 entry->Dispose();
1399 } 1899 }
1400 ref_groups->Rewind(last); 1900 ref_groups->Rewind(last);
1401 } 1901 }
1402 1902
1403 1903
1404 // Mark all objects reachable from the objects on the marking stack. 1904 // Mark all objects reachable from the objects on the marking stack.
1405 // Before: the marking stack contains zero or more heap object pointers. 1905 // Before: the marking stack contains zero or more heap object pointers.
1406 // After: the marking stack is empty, and all objects reachable from the 1906 // After: the marking stack is empty, and all objects reachable from the
1407 // marking stack have been marked, or are overflowed in the heap. 1907 // marking stack have been marked, or are overflowed in the heap.
1408 void MarkCompactCollector::EmptyMarkingStack() { 1908 void MarkCompactCollector::EmptyMarkingDeque() {
1409 while (!marking_stack_.is_empty()) { 1909 while (!marking_deque_.IsEmpty()) {
1410 while (!marking_stack_.is_empty()) { 1910 while (!marking_deque_.IsEmpty()) {
1411 HeapObject* object = marking_stack_.Pop(); 1911 HeapObject* object = marking_deque_.Pop();
1412 ASSERT(object->IsHeapObject()); 1912 ASSERT(object->IsHeapObject());
1413 ASSERT(heap()->Contains(object)); 1913 ASSERT(heap()->Contains(object));
1414 ASSERT(object->IsMarked()); 1914 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
1415 ASSERT(!object->IsOverflowed());
1416 1915
1417 // Because the object is marked, we have to recover the original map 1916 Map* map = object->map();
1418 // pointer and use it to mark the object's body. 1917 MarkBit map_mark = Marking::MarkBitFrom(map);
1419 MapWord map_word = object->map_word(); 1918 MarkObject(map, map_mark);
1420 map_word.ClearMark();
1421 Map* map = map_word.ToMap();
1422 MarkObject(map);
1423 1919
1424 StaticMarkingVisitor::IterateBody(map, object); 1920 StaticMarkingVisitor::IterateBody(map, object);
1425 } 1921 }
1426 1922
1427 // Process encountered weak maps, mark objects only reachable by those 1923 // Process encountered weak maps, mark objects only reachable by those
1428 // weak maps and repeat until fix-point is reached. 1924 // weak maps and repeat until fix-point is reached.
1429 ProcessWeakMaps(); 1925 ProcessWeakMaps();
1430 } 1926 }
1431 } 1927 }
1432 1928
1433 1929
1434 // Sweep the heap for overflowed objects, clear their overflow bits, and 1930 // Sweep the heap for overflowed objects, clear their overflow bits, and
1435 // push them on the marking stack. Stop early if the marking stack fills 1931 // push them on the marking stack. Stop early if the marking stack fills
1436 // before sweeping completes. If sweeping completes, there are no remaining 1932 // before sweeping completes. If sweeping completes, there are no remaining
1437 // overflowed objects in the heap so the overflow flag on the markings stack 1933 // overflowed objects in the heap so the overflow flag on the markings stack
1438 // is cleared. 1934 // is cleared.
1439 void MarkCompactCollector::RefillMarkingStack() { 1935 void MarkCompactCollector::RefillMarkingDeque() {
1440 ASSERT(marking_stack_.overflowed()); 1936 ASSERT(marking_deque_.overflowed());
1441 1937
1442 SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize); 1938 SemiSpaceIterator new_it(heap()->new_space());
1443 OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it); 1939 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
1444 if (marking_stack_.is_full()) return; 1940 if (marking_deque_.IsFull()) return;
1445 1941
1446 HeapObjectIterator old_pointer_it(heap()->old_pointer_space(), 1942 DiscoverGreyObjectsInSpace(heap(),
1447 &OverflowObjectSize); 1943 &marking_deque_,
1448 OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it); 1944 heap()->old_pointer_space());
1449 if (marking_stack_.is_full()) return; 1945 if (marking_deque_.IsFull()) return;
1450 1946
1451 HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize); 1947 DiscoverGreyObjectsInSpace(heap(),
1452 OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it); 1948 &marking_deque_,
1453 if (marking_stack_.is_full()) return; 1949 heap()->old_data_space());
1950 if (marking_deque_.IsFull()) return;
1454 1951
1455 HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize); 1952 DiscoverGreyObjectsInSpace(heap(),
1456 OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it); 1953 &marking_deque_,
1457 if (marking_stack_.is_full()) return; 1954 heap()->code_space());
1955 if (marking_deque_.IsFull()) return;
1458 1956
1459 HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize); 1957 DiscoverGreyObjectsInSpace(heap(),
1460 OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it); 1958 &marking_deque_,
1461 if (marking_stack_.is_full()) return; 1959 heap()->map_space());
1960 if (marking_deque_.IsFull()) return;
1462 1961
1463 HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize); 1962 DiscoverGreyObjectsInSpace(heap(),
1464 OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it); 1963 &marking_deque_,
1465 if (marking_stack_.is_full()) return; 1964 heap()->cell_space());
1965 if (marking_deque_.IsFull()) return;
1466 1966
1467 LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize); 1967 LargeObjectIterator lo_it(heap()->lo_space());
1468 OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it); 1968 DiscoverGreyObjectsWithIterator(heap(),
1469 if (marking_stack_.is_full()) return; 1969 &marking_deque_,
1970 &lo_it);
1971 if (marking_deque_.IsFull()) return;
1470 1972
1471 marking_stack_.clear_overflowed(); 1973 marking_deque_.ClearOverflowed();
1472 } 1974 }
1473 1975
1474 1976
1475 // Mark all objects reachable (transitively) from objects on the marking 1977 // Mark all objects reachable (transitively) from objects on the marking
1476 // stack. Before: the marking stack contains zero or more heap object 1978 // stack. Before: the marking stack contains zero or more heap object
1477 // pointers. After: the marking stack is empty and there are no overflowed 1979 // pointers. After: the marking stack is empty and there are no overflowed
1478 // objects in the heap. 1980 // objects in the heap.
1479 void MarkCompactCollector::ProcessMarkingStack() { 1981 void MarkCompactCollector::ProcessMarkingDeque() {
1480 EmptyMarkingStack(); 1982 EmptyMarkingDeque();
1481 while (marking_stack_.overflowed()) { 1983 while (marking_deque_.overflowed()) {
1482 RefillMarkingStack(); 1984 RefillMarkingDeque();
1483 EmptyMarkingStack(); 1985 EmptyMarkingDeque();
1484 } 1986 }
1485 } 1987 }
1486 1988
1487 1989
1488 void MarkCompactCollector::ProcessExternalMarking() { 1990 void MarkCompactCollector::ProcessExternalMarking() {
1489 bool work_to_do = true; 1991 bool work_to_do = true;
1490 ASSERT(marking_stack_.is_empty()); 1992 ASSERT(marking_deque_.IsEmpty());
1491 while (work_to_do) { 1993 while (work_to_do) {
1492 MarkObjectGroups(); 1994 MarkObjectGroups();
1493 MarkImplicitRefGroups(); 1995 MarkImplicitRefGroups();
1494 work_to_do = !marking_stack_.is_empty(); 1996 work_to_do = !marking_deque_.IsEmpty();
1495 ProcessMarkingStack(); 1997 ProcessMarkingDeque();
1496 } 1998 }
1497 } 1999 }
1498 2000
1499 2001
1500 void MarkCompactCollector::MarkLiveObjects() { 2002 void MarkCompactCollector::MarkLiveObjects() {
1501 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK); 2003 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
1502 // The recursive GC marker detects when it is nearing stack overflow, 2004 // The recursive GC marker detects when it is nearing stack overflow,
1503 // and switches to a different marking system. JS interrupts interfere 2005 // and switches to a different marking system. JS interrupts interfere
1504 // with the C stack limit check. 2006 // with the C stack limit check.
1505 PostponeInterruptsScope postpone(heap()->isolate()); 2007 PostponeInterruptsScope postpone(heap()->isolate());
1506 2008
2009 bool incremental_marking_overflowed = false;
2010 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2011 if (was_marked_incrementally_) {
2012 // Finalize the incremental marking and check whether we had an overflow.
2013 // Both markers use grey color to mark overflowed objects so
2014 // non-incremental marker can deal with them as if overflow
2015 // occured during normal marking.
2016 // But incremental marker uses a separate marking deque
2017 // so we have to explicitly copy it's overflow state.
2018 incremental_marking->Finalize();
2019 incremental_marking_overflowed =
2020 incremental_marking->marking_deque()->overflowed();
2021 incremental_marking->marking_deque()->ClearOverflowed();
2022 } else {
2023 // Abort any pending incremental activities e.g. incremental sweeping.
2024 incremental_marking->Abort();
2025 }
2026
1507 #ifdef DEBUG 2027 #ifdef DEBUG
1508 ASSERT(state_ == PREPARE_GC); 2028 ASSERT(state_ == PREPARE_GC);
1509 state_ = MARK_LIVE_OBJECTS; 2029 state_ = MARK_LIVE_OBJECTS;
1510 #endif 2030 #endif
1511 // The to space contains live objects, the from space is used as a marking 2031 // The to space contains live objects, a page in from space is used as a
1512 // stack. 2032 // marking stack.
1513 marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(), 2033 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
1514 heap()->new_space()->FromSpaceHigh()); 2034 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2035 if (FLAG_force_marking_deque_overflows) {
2036 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2037 }
2038 marking_deque_.Initialize(marking_deque_start,
2039 marking_deque_end);
2040 ASSERT(!marking_deque_.overflowed());
1515 2041
1516 ASSERT(!marking_stack_.overflowed()); 2042 if (incremental_marking_overflowed) {
2043 // There are overflowed objects left in the heap after incremental marking.
2044 marking_deque_.SetOverflowed();
2045 }
1517 2046
1518 PrepareForCodeFlushing(); 2047 PrepareForCodeFlushing();
1519 2048
1520 RootMarkingVisitor root_visitor(heap()); 2049 RootMarkingVisitor root_visitor(heap());
1521 MarkRoots(&root_visitor); 2050 MarkRoots(&root_visitor);
1522 2051
1523 // The objects reachable from the roots are marked, yet unreachable 2052 // The objects reachable from the roots are marked, yet unreachable
1524 // objects are unmarked. Mark objects reachable due to host 2053 // objects are unmarked. Mark objects reachable due to host
1525 // application specific logic. 2054 // application specific logic.
1526 ProcessExternalMarking(); 2055 ProcessExternalMarking();
1527 2056
1528 // The objects reachable from the roots or object groups are marked, 2057 // The objects reachable from the roots or object groups are marked,
1529 // yet unreachable objects are unmarked. Mark objects reachable 2058 // yet unreachable objects are unmarked. Mark objects reachable
1530 // only from weak global handles. 2059 // only from weak global handles.
1531 // 2060 //
1532 // First we identify nonlive weak handles and mark them as pending 2061 // First we identify nonlive weak handles and mark them as pending
1533 // destruction. 2062 // destruction.
1534 heap()->isolate()->global_handles()->IdentifyWeakHandles( 2063 heap()->isolate()->global_handles()->IdentifyWeakHandles(
1535 &IsUnmarkedHeapObject); 2064 &IsUnmarkedHeapObject);
1536 // Then we mark the objects and process the transitive closure. 2065 // Then we mark the objects and process the transitive closure.
1537 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); 2066 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
1538 while (marking_stack_.overflowed()) { 2067 while (marking_deque_.overflowed()) {
1539 RefillMarkingStack(); 2068 RefillMarkingDeque();
1540 EmptyMarkingStack(); 2069 EmptyMarkingDeque();
1541 } 2070 }
1542 2071
1543 // Repeat host application specific marking to mark unmarked objects 2072 // Repeat host application specific marking to mark unmarked objects
1544 // reachable from the weak roots. 2073 // reachable from the weak roots.
1545 ProcessExternalMarking(); 2074 ProcessExternalMarking();
1546 2075
2076 AfterMarking();
2077 }
2078
2079
2080 void MarkCompactCollector::AfterMarking() {
1547 // Object literal map caches reference symbols (cache keys) and maps 2081 // Object literal map caches reference symbols (cache keys) and maps
1548 // (cache values). At this point still useful maps have already been 2082 // (cache values). At this point still useful maps have already been
1549 // marked. Mark the keys for the alive values before we process the 2083 // marked. Mark the keys for the alive values before we process the
1550 // symbol table. 2084 // symbol table.
1551 ProcessMapCaches(); 2085 ProcessMapCaches();
1552 2086
1553 // Prune the symbol table removing all symbols only pointed to by the 2087 // Prune the symbol table removing all symbols only pointed to by the
1554 // symbol table. Cannot use symbol_table() here because the symbol 2088 // symbol table. Cannot use symbol_table() here because the symbol
1555 // table is marked. 2089 // table is marked.
1556 SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table(); 2090 SymbolTable* symbol_table = heap()->symbol_table();
1557 SymbolTableCleaner v(heap()); 2091 SymbolTableCleaner v(heap());
1558 symbol_table->IterateElements(&v); 2092 symbol_table->IterateElements(&v);
1559 symbol_table->ElementsRemoved(v.PointersRemoved()); 2093 symbol_table->ElementsRemoved(v.PointersRemoved());
1560 heap()->external_string_table_.Iterate(&v); 2094 heap()->external_string_table_.Iterate(&v);
1561 heap()->external_string_table_.CleanUp(); 2095 heap()->external_string_table_.CleanUp();
1562 2096
1563 // Process the weak references. 2097 // Process the weak references.
1564 MarkCompactWeakObjectRetainer mark_compact_object_retainer; 2098 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
1565 heap()->ProcessWeakReferences(&mark_compact_object_retainer); 2099 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
1566 2100
1567 // Remove object groups after marking phase. 2101 // Remove object groups after marking phase.
1568 heap()->isolate()->global_handles()->RemoveObjectGroups(); 2102 heap()->isolate()->global_handles()->RemoveObjectGroups();
1569 heap()->isolate()->global_handles()->RemoveImplicitRefGroups(); 2103 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
1570 2104
1571 // Flush code from collected candidates. 2105 // Flush code from collected candidates.
1572 if (is_code_flushing_enabled()) { 2106 if (is_code_flushing_enabled()) {
1573 code_flusher_->ProcessCandidates(); 2107 code_flusher_->ProcessCandidates();
1574 } 2108 }
1575 2109
1576 // Clean up dead objects from the runtime profiler. 2110 // Clean up dead objects from the runtime profiler.
1577 heap()->isolate()->runtime_profiler()->RemoveDeadSamples(); 2111 heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
1578 } 2112 }
1579 2113
1580 2114
1581 void MarkCompactCollector::ProcessMapCaches() { 2115 void MarkCompactCollector::ProcessMapCaches() {
1582 Object* raw_context = heap()->global_contexts_list_; 2116 Object* raw_context = heap()->global_contexts_list_;
1583 while (raw_context != heap()->undefined_value()) { 2117 while (raw_context != heap()->undefined_value()) {
1584 Context* context = reinterpret_cast<Context*>(raw_context); 2118 Context* context = reinterpret_cast<Context*>(raw_context);
1585 if (context->IsMarked()) { 2119 if (IsMarked(context)) {
1586 HeapObject* raw_map_cache = 2120 HeapObject* raw_map_cache =
1587 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX)); 2121 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
1588 // A map cache may be reachable from the stack. In this case 2122 // A map cache may be reachable from the stack. In this case
1589 // it's already transitively marked and it's too late to clean 2123 // it's already transitively marked and it's too late to clean
1590 // up its parts. 2124 // up its parts.
1591 if (!raw_map_cache->IsMarked() && 2125 if (!IsMarked(raw_map_cache) &&
1592 raw_map_cache != heap()->undefined_value()) { 2126 raw_map_cache != heap()->undefined_value()) {
1593 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache); 2127 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
1594 int existing_elements = map_cache->NumberOfElements(); 2128 int existing_elements = map_cache->NumberOfElements();
1595 int used_elements = 0; 2129 int used_elements = 0;
1596 for (int i = MapCache::kElementsStartIndex; 2130 for (int i = MapCache::kElementsStartIndex;
1597 i < map_cache->length(); 2131 i < map_cache->length();
1598 i += MapCache::kEntrySize) { 2132 i += MapCache::kEntrySize) {
1599 Object* raw_key = map_cache->get(i); 2133 Object* raw_key = map_cache->get(i);
1600 if (raw_key == heap()->undefined_value() || 2134 if (raw_key == heap()->undefined_value() ||
1601 raw_key == heap()->null_value()) continue; 2135 raw_key == heap()->null_value()) continue;
1602 STATIC_ASSERT(MapCache::kEntrySize == 2); 2136 STATIC_ASSERT(MapCache::kEntrySize == 2);
1603 Object* raw_map = map_cache->get(i + 1); 2137 Object* raw_map = map_cache->get(i + 1);
1604 if (raw_map->IsHeapObject() && 2138 if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
1605 HeapObject::cast(raw_map)->IsMarked()) {
1606 ++used_elements; 2139 ++used_elements;
1607 } else { 2140 } else {
1608 // Delete useless entries with unmarked maps. 2141 // Delete useless entries with unmarked maps.
1609 ASSERT(raw_map->IsMap()); 2142 ASSERT(raw_map->IsMap());
1610 map_cache->set_null_unchecked(heap(), i); 2143 map_cache->set_null_unchecked(heap(), i);
1611 map_cache->set_null_unchecked(heap(), i + 1); 2144 map_cache->set_null_unchecked(heap(), i + 1);
1612 } 2145 }
1613 } 2146 }
1614 if (used_elements == 0) { 2147 if (used_elements == 0) {
1615 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value()); 2148 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
1616 } else { 2149 } else {
1617 // Note: we don't actually shrink the cache here to avoid 2150 // Note: we don't actually shrink the cache here to avoid
1618 // extra complexity during GC. We rely on subsequent cache 2151 // extra complexity during GC. We rely on subsequent cache
1619 // usages (EnsureCapacity) to do this. 2152 // usages (EnsureCapacity) to do this.
1620 map_cache->ElementsRemoved(existing_elements - used_elements); 2153 map_cache->ElementsRemoved(existing_elements - used_elements);
1621 MarkObject(map_cache); 2154 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2155 MarkObject(map_cache, map_cache_markbit);
1622 } 2156 }
1623 } 2157 }
1624 } 2158 }
1625 // Move to next element in the list. 2159 // Move to next element in the list.
1626 raw_context = context->get(Context::NEXT_CONTEXT_LINK); 2160 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
1627 } 2161 }
1628 ProcessMarkingStack(); 2162 ProcessMarkingDeque();
1629 } 2163 }
1630 2164
1631 2165
1632 #ifdef DEBUG 2166 #ifdef DEBUG
1633 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { 2167 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
1634 live_bytes_ += obj->Size(); 2168 live_bytes_ += obj->Size();
1635 if (heap()->new_space()->Contains(obj)) { 2169 if (heap()->new_space()->Contains(obj)) {
1636 live_young_objects_size_ += obj->Size(); 2170 live_young_objects_size_ += obj->Size();
1637 } else if (heap()->map_space()->Contains(obj)) { 2171 } else if (heap()->map_space()->Contains(obj)) {
1638 ASSERT(obj->IsMap()); 2172 ASSERT(obj->IsMap());
1639 live_map_objects_size_ += obj->Size(); 2173 live_map_objects_size_ += obj->Size();
1640 } else if (heap()->cell_space()->Contains(obj)) { 2174 } else if (heap()->cell_space()->Contains(obj)) {
1641 ASSERT(obj->IsJSGlobalPropertyCell()); 2175 ASSERT(obj->IsJSGlobalPropertyCell());
1642 live_cell_objects_size_ += obj->Size(); 2176 live_cell_objects_size_ += obj->Size();
1643 } else if (heap()->old_pointer_space()->Contains(obj)) { 2177 } else if (heap()->old_pointer_space()->Contains(obj)) {
1644 live_old_pointer_objects_size_ += obj->Size(); 2178 live_old_pointer_objects_size_ += obj->Size();
1645 } else if (heap()->old_data_space()->Contains(obj)) { 2179 } else if (heap()->old_data_space()->Contains(obj)) {
1646 live_old_data_objects_size_ += obj->Size(); 2180 live_old_data_objects_size_ += obj->Size();
1647 } else if (heap()->code_space()->Contains(obj)) { 2181 } else if (heap()->code_space()->Contains(obj)) {
1648 live_code_objects_size_ += obj->Size(); 2182 live_code_objects_size_ += obj->Size();
1649 } else if (heap()->lo_space()->Contains(obj)) { 2183 } else if (heap()->lo_space()->Contains(obj)) {
1650 live_lo_objects_size_ += obj->Size(); 2184 live_lo_objects_size_ += obj->Size();
1651 } else { 2185 } else {
1652 UNREACHABLE(); 2186 UNREACHABLE();
1653 } 2187 }
1654 } 2188 }
1655 #endif // DEBUG 2189 #endif // DEBUG
1656 2190
1657 2191
1658 void MarkCompactCollector::SweepLargeObjectSpace() { 2192 void MarkCompactCollector::ReattachInitialMaps() {
1659 #ifdef DEBUG 2193 HeapObjectIterator map_iterator(heap()->map_space());
1660 ASSERT(state_ == MARK_LIVE_OBJECTS); 2194 for (HeapObject* obj = map_iterator.Next();
1661 state_ = 2195 obj != NULL;
1662 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES; 2196 obj = map_iterator.Next()) {
1663 #endif 2197 if (obj->IsFreeSpace()) continue;
1664 // Deallocate unmarked objects and clear marked bits for marked objects. 2198 Map* map = Map::cast(obj);
1665 heap()->lo_space()->FreeUnmarkedObjects();
1666 }
1667 2199
2200 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2201 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
1668 2202
1669 // Safe to use during marking phase only. 2203 if (map->attached_to_shared_function_info()) {
1670 bool MarkCompactCollector::SafeIsMap(HeapObject* object) { 2204 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
1671 MapWord metamap = object->map_word(); 2205 }
1672 metamap.ClearMark(); 2206 }
1673 return metamap.ToMap()->instance_type() == MAP_TYPE;
1674 } 2207 }
1675 2208
1676 2209
1677 void MarkCompactCollector::ClearNonLiveTransitions() { 2210 void MarkCompactCollector::ClearNonLiveTransitions() {
1678 HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject); 2211 HeapObjectIterator map_iterator(heap()->map_space());
1679 // Iterate over the map space, setting map transitions that go from 2212 // Iterate over the map space, setting map transitions that go from
1680 // a marked map to an unmarked map to null transitions. At the same time, 2213 // a marked map to an unmarked map to null transitions. At the same time,
1681 // set all the prototype fields of maps back to their original value, 2214 // set all the prototype fields of maps back to their original value,
1682 // dropping the back pointers temporarily stored in the prototype field. 2215 // dropping the back pointers temporarily stored in the prototype field.
1683 // Setting the prototype field requires following the linked list of 2216 // Setting the prototype field requires following the linked list of
1684 // back pointers, reversing them all at once. This allows us to find 2217 // back pointers, reversing them all at once. This allows us to find
1685 // those maps with map transitions that need to be nulled, and only 2218 // those maps with map transitions that need to be nulled, and only
1686 // scan the descriptor arrays of those maps, not all maps. 2219 // scan the descriptor arrays of those maps, not all maps.
1687 // All of these actions are carried out only on maps of JSObjects 2220 // All of these actions are carried out only on maps of JSObjects
1688 // and related subtypes. 2221 // and related subtypes.
1689 for (HeapObject* obj = map_iterator.next(); 2222 for (HeapObject* obj = map_iterator.Next();
1690 obj != NULL; obj = map_iterator.next()) { 2223 obj != NULL; obj = map_iterator.Next()) {
1691 Map* map = reinterpret_cast<Map*>(obj); 2224 Map* map = reinterpret_cast<Map*>(obj);
1692 if (!map->IsMarked() && map->IsByteArray()) continue; 2225 MarkBit map_mark = Marking::MarkBitFrom(map);
2226 if (map->IsFreeSpace()) continue;
1693 2227
1694 ASSERT(SafeIsMap(map)); 2228 ASSERT(map->IsMap());
1695 // Only JSObject and subtypes have map transitions and back pointers. 2229 // Only JSObject and subtypes have map transitions and back pointers.
1696 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); 2230 STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
1697 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue; 2231 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
1698 2232
1699 if (map->IsMarked() && map->attached_to_shared_function_info()) { 2233 if (map_mark.Get() &&
2234 map->attached_to_shared_function_info()) {
1700 // This map is used for inobject slack tracking and has been detached 2235 // This map is used for inobject slack tracking and has been detached
1701 // from SharedFunctionInfo during the mark phase. 2236 // from SharedFunctionInfo during the mark phase.
1702 // Since it survived the GC, reattach it now. 2237 // Since it survived the GC, reattach it now.
1703 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map); 2238 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
1704 } 2239 }
1705 2240
1706 // Clear dead prototype transitions. 2241 // Clear dead prototype transitions.
1707 int number_of_transitions = map->NumberOfProtoTransitions(); 2242 int number_of_transitions = map->NumberOfProtoTransitions();
1708 if (number_of_transitions > 0) { 2243 FixedArray* prototype_transitions = map->prototype_transitions();
1709 FixedArray* prototype_transitions = 2244
1710 map->unchecked_prototype_transitions(); 2245 int new_number_of_transitions = 0;
1711 int new_number_of_transitions = 0; 2246 const int header = Map::kProtoTransitionHeaderSize;
1712 const int header = Map::kProtoTransitionHeaderSize; 2247 const int proto_offset =
1713 const int proto_offset = 2248 header + Map::kProtoTransitionPrototypeOffset;
1714 header + Map::kProtoTransitionPrototypeOffset; 2249 const int map_offset = header + Map::kProtoTransitionMapOffset;
1715 const int map_offset = header + Map::kProtoTransitionMapOffset; 2250 const int step = Map::kProtoTransitionElementsPerEntry;
1716 const int step = Map::kProtoTransitionElementsPerEntry; 2251 for (int i = 0; i < number_of_transitions; i++) {
1717 for (int i = 0; i < number_of_transitions; i++) { 2252 Object* prototype = prototype_transitions->get(proto_offset + i * step);
1718 Object* prototype = prototype_transitions->get(proto_offset + i * step); 2253 Object* cached_map = prototype_transitions->get(map_offset + i * step);
1719 Object* cached_map = prototype_transitions->get(map_offset + i * step); 2254 if (IsMarked(prototype) && IsMarked(cached_map)) {
1720 if (HeapObject::cast(prototype)->IsMarked() && 2255 if (new_number_of_transitions != i) {
1721 HeapObject::cast(cached_map)->IsMarked()) { 2256 prototype_transitions->set_unchecked(
1722 if (new_number_of_transitions != i) { 2257 heap_,
1723 prototype_transitions->set_unchecked( 2258 proto_offset + new_number_of_transitions * step,
1724 heap_, 2259 prototype,
1725 proto_offset + new_number_of_transitions * step, 2260 UPDATE_WRITE_BARRIER);
1726 prototype, 2261 prototype_transitions->set_unchecked(
1727 UPDATE_WRITE_BARRIER); 2262 heap_,
1728 prototype_transitions->set_unchecked( 2263 map_offset + new_number_of_transitions * step,
1729 heap_, 2264 cached_map,
1730 map_offset + new_number_of_transitions * step, 2265 SKIP_WRITE_BARRIER);
1731 cached_map,
1732 SKIP_WRITE_BARRIER);
1733 }
1734 new_number_of_transitions++;
1735 } 2266 }
1736 } 2267 }
1737 2268
1738 // Fill slots that became free with undefined value. 2269 // Fill slots that became free with undefined value.
1739 Object* undefined = heap()->raw_unchecked_undefined_value(); 2270 Object* undefined = heap()->undefined_value();
1740 for (int i = new_number_of_transitions * step; 2271 for (int i = new_number_of_transitions * step;
1741 i < number_of_transitions * step; 2272 i < number_of_transitions * step;
1742 i++) { 2273 i++) {
2274 // The undefined object is on a page that is never compacted and never
2275 // in new space so it is OK to skip the write barrier. Also it's a
2276 // root.
1743 prototype_transitions->set_unchecked(heap_, 2277 prototype_transitions->set_unchecked(heap_,
1744 header + i, 2278 header + i,
1745 undefined, 2279 undefined,
1746 SKIP_WRITE_BARRIER); 2280 SKIP_WRITE_BARRIER);
2281
2282 Object** undefined_slot =
2283 prototype_transitions->data_start() + i;
2284 RecordSlot(undefined_slot, undefined_slot, undefined);
1747 } 2285 }
1748 map->SetNumberOfProtoTransitions(new_number_of_transitions); 2286 map->SetNumberOfProtoTransitions(new_number_of_transitions);
1749 } 2287 }
1750 2288
1751 // Follow the chain of back pointers to find the prototype. 2289 // Follow the chain of back pointers to find the prototype.
1752 Map* current = map; 2290 Map* current = map;
1753 while (SafeIsMap(current)) { 2291 while (current->IsMap()) {
1754 current = reinterpret_cast<Map*>(current->prototype()); 2292 current = reinterpret_cast<Map*>(current->prototype());
1755 ASSERT(current->IsHeapObject()); 2293 ASSERT(current->IsHeapObject());
1756 } 2294 }
1757 Object* real_prototype = current; 2295 Object* real_prototype = current;
1758 2296
1759 // Follow back pointers, setting them to prototype, 2297 // Follow back pointers, setting them to prototype,
1760 // clearing map transitions when necessary. 2298 // clearing map transitions when necessary.
1761 current = map; 2299 current = map;
1762 bool on_dead_path = !current->IsMarked(); 2300 bool on_dead_path = !map_mark.Get();
1763 Object* next; 2301 Object* next;
1764 while (SafeIsMap(current)) { 2302 while (current->IsMap()) {
1765 next = current->prototype(); 2303 next = current->prototype();
1766 // There should never be a dead map above a live map. 2304 // There should never be a dead map above a live map.
1767 ASSERT(on_dead_path || current->IsMarked()); 2305 MarkBit current_mark = Marking::MarkBitFrom(current);
2306 bool is_alive = current_mark.Get();
2307 ASSERT(on_dead_path || is_alive);
1768 2308
1769 // A live map above a dead map indicates a dead transition. 2309 // A live map above a dead map indicates a dead transition.
1770 // This test will always be false on the first iteration. 2310 // This test will always be false on the first iteration.
1771 if (on_dead_path && current->IsMarked()) { 2311 if (on_dead_path && is_alive) {
1772 on_dead_path = false; 2312 on_dead_path = false;
1773 current->ClearNonLiveTransitions(heap(), real_prototype); 2313 current->ClearNonLiveTransitions(heap(), real_prototype);
1774 } 2314 }
1775 *HeapObject::RawField(current, Map::kPrototypeOffset) = 2315 *HeapObject::RawField(current, Map::kPrototypeOffset) =
1776 real_prototype; 2316 real_prototype;
2317
2318 if (is_alive) {
2319 Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
2320 RecordSlot(slot, slot, real_prototype);
2321 }
1777 current = reinterpret_cast<Map*>(next); 2322 current = reinterpret_cast<Map*>(next);
1778 } 2323 }
1779 } 2324 }
1780 } 2325 }
1781 2326
1782 2327
1783 void MarkCompactCollector::ProcessWeakMaps() { 2328 void MarkCompactCollector::ProcessWeakMaps() {
1784 Object* weak_map_obj = encountered_weak_maps(); 2329 Object* weak_map_obj = encountered_weak_maps();
1785 while (weak_map_obj != Smi::FromInt(0)) { 2330 while (weak_map_obj != Smi::FromInt(0)) {
1786 ASSERT(HeapObject::cast(weak_map_obj)->IsMarked()); 2331 ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
1787 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj); 2332 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
1788 ObjectHashTable* table = weak_map->unchecked_table(); 2333 ObjectHashTable* table = weak_map->unchecked_table();
1789 for (int i = 0; i < table->Capacity(); i++) { 2334 for (int i = 0; i < table->Capacity(); i++) {
1790 if (HeapObject::cast(table->KeyAt(i))->IsMarked()) { 2335 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
1791 Object* value = table->get(table->EntryToValueIndex(i)); 2336 Object* value = table->get(table->EntryToValueIndex(i));
1792 StaticMarkingVisitor::MarkObjectByPointer(heap(), &value); 2337 StaticMarkingVisitor::VisitPointer(heap(), &value);
1793 table->set_unchecked(heap(), 2338 table->set_unchecked(heap(),
1794 table->EntryToValueIndex(i), 2339 table->EntryToValueIndex(i),
1795 value, 2340 value,
1796 UPDATE_WRITE_BARRIER); 2341 UPDATE_WRITE_BARRIER);
1797 } 2342 }
1798 } 2343 }
1799 weak_map_obj = weak_map->next(); 2344 weak_map_obj = weak_map->next();
1800 } 2345 }
1801 } 2346 }
1802 2347
1803 2348
1804 void MarkCompactCollector::ClearWeakMaps() { 2349 void MarkCompactCollector::ClearWeakMaps() {
1805 Object* weak_map_obj = encountered_weak_maps(); 2350 Object* weak_map_obj = encountered_weak_maps();
1806 while (weak_map_obj != Smi::FromInt(0)) { 2351 while (weak_map_obj != Smi::FromInt(0)) {
1807 ASSERT(HeapObject::cast(weak_map_obj)->IsMarked()); 2352 ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
1808 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj); 2353 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
1809 ObjectHashTable* table = weak_map->unchecked_table(); 2354 ObjectHashTable* table = weak_map->unchecked_table();
1810 for (int i = 0; i < table->Capacity(); i++) { 2355 for (int i = 0; i < table->Capacity(); i++) {
1811 if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) { 2356 if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
1812 table->RemoveEntry(i, heap()); 2357 table->RemoveEntry(i, heap());
1813 } 2358 }
1814 } 2359 }
1815 weak_map_obj = weak_map->next(); 2360 weak_map_obj = weak_map->next();
1816 weak_map->set_next(Smi::FromInt(0)); 2361 weak_map->set_next(Smi::FromInt(0));
1817 } 2362 }
1818 set_encountered_weak_maps(Smi::FromInt(0)); 2363 set_encountered_weak_maps(Smi::FromInt(0));
1819 } 2364 }
1820 2365
1821 // -------------------------------------------------------------------------
1822 // Phase 2: Encode forwarding addresses.
1823 // When compacting, forwarding addresses for objects in old space and map
1824 // space are encoded in their map pointer word (along with an encoding of
1825 // their map pointers).
1826 //
1827 // The excact encoding is described in the comments for class MapWord in
1828 // objects.h.
1829 //
1830 // An address range [start, end) can have both live and non-live objects.
1831 // Maximal non-live regions are marked so they can be skipped on subsequent
1832 // sweeps of the heap. A distinguished map-pointer encoding is used to mark
1833 // free regions of one-word size (in which case the next word is the start
1834 // of a live object). A second distinguished map-pointer encoding is used
1835 // to mark free regions larger than one word, and the size of the free
1836 // region (including the first word) is written to the second word of the
1837 // region.
1838 //
1839 // Any valid map page offset must lie in the object area of the page, so map
1840 // page offsets less than Page::kObjectStartOffset are invalid. We use a
1841 // pair of distinguished invalid map encodings (for single word and multiple
1842 // words) to indicate free regions in the page found during computation of
1843 // forwarding addresses and skipped over in subsequent sweeps.
1844
1845
1846 // Encode a free region, defined by the given start address and size, in the
1847 // first word or two of the region.
1848 void EncodeFreeRegion(Address free_start, int free_size) {
1849 ASSERT(free_size >= kIntSize);
1850 if (free_size == kIntSize) {
1851 Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
1852 } else {
1853 ASSERT(free_size >= 2 * kIntSize);
1854 Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
1855 Memory::int_at(free_start + kIntSize) = free_size;
1856 }
1857
1858 #ifdef DEBUG
1859 // Zap the body of the free region.
1860 if (FLAG_enable_slow_asserts) {
1861 for (int offset = 2 * kIntSize;
1862 offset < free_size;
1863 offset += kPointerSize) {
1864 Memory::Address_at(free_start + offset) = kZapValue;
1865 }
1866 }
1867 #endif
1868 }
1869
1870
1871 // Try to promote all objects in new space. Heap numbers and sequential
1872 // strings are promoted to the code space, large objects to large object space,
1873 // and all others to the old space.
1874 inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
1875 HeapObject* object,
1876 int object_size) {
1877 MaybeObject* forwarded;
1878 if (object_size > heap->MaxObjectSizeInPagedSpace()) {
1879 forwarded = Failure::Exception();
1880 } else {
1881 OldSpace* target_space = heap->TargetSpace(object);
1882 ASSERT(target_space == heap->old_pointer_space() ||
1883 target_space == heap->old_data_space());
1884 forwarded = target_space->MCAllocateRaw(object_size);
1885 }
1886 Object* result;
1887 if (!forwarded->ToObject(&result)) {
1888 result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
1889 }
1890 return result;
1891 }
1892
1893
1894 // Allocation functions for the paged spaces call the space's MCAllocateRaw.
1895 MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
1896 Heap *heap,
1897 HeapObject* ignore,
1898 int object_size) {
1899 return heap->old_pointer_space()->MCAllocateRaw(object_size);
1900 }
1901
1902
1903 MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
1904 Heap* heap,
1905 HeapObject* ignore,
1906 int object_size) {
1907 return heap->old_data_space()->MCAllocateRaw(object_size);
1908 }
1909
1910
1911 MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
1912 Heap* heap,
1913 HeapObject* ignore,
1914 int object_size) {
1915 return heap->code_space()->MCAllocateRaw(object_size);
1916 }
1917
1918
1919 MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
1920 Heap* heap,
1921 HeapObject* ignore,
1922 int object_size) {
1923 return heap->map_space()->MCAllocateRaw(object_size);
1924 }
1925
1926
1927 MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
1928 Heap* heap, HeapObject* ignore, int object_size) {
1929 return heap->cell_space()->MCAllocateRaw(object_size);
1930 }
1931
1932
1933 // The forwarding address is encoded at the same offset as the current
1934 // to-space object, but in from space.
1935 inline void EncodeForwardingAddressInNewSpace(Heap* heap,
1936 HeapObject* old_object,
1937 int object_size,
1938 Object* new_object,
1939 int* ignored) {
1940 int offset =
1941 heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
1942 Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
1943 HeapObject::cast(new_object)->address();
1944 }
1945
1946
1947 // The forwarding address is encoded in the map pointer of the object as an
1948 // offset (in terms of live bytes) from the address of the first live object
1949 // in the page.
1950 inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
1951 HeapObject* old_object,
1952 int object_size,
1953 Object* new_object,
1954 int* offset) {
1955 // Record the forwarding address of the first live object if necessary.
1956 if (*offset == 0) {
1957 Page::FromAddress(old_object->address())->mc_first_forwarded =
1958 HeapObject::cast(new_object)->address();
1959 }
1960
1961 MapWord encoding =
1962 MapWord::EncodeAddress(old_object->map()->address(), *offset);
1963 old_object->set_map_word(encoding);
1964 *offset += object_size;
1965 ASSERT(*offset <= Page::kObjectAreaSize);
1966 }
1967
1968
1969 // Most non-live objects are ignored.
1970 inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
1971
1972
1973 // Function template that, given a range of addresses (eg, a semispace or a
1974 // paged space page), iterates through the objects in the range to clear
1975 // mark bits and compute and encode forwarding addresses. As a side effect,
1976 // maximal free chunks are marked so that they can be skipped on subsequent
1977 // sweeps.
1978 //
1979 // The template parameters are an allocation function, a forwarding address
1980 // encoding function, and a function to process non-live objects.
1981 template<MarkCompactCollector::AllocationFunction Alloc,
1982 MarkCompactCollector::EncodingFunction Encode,
1983 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1984 inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
1985 Address start,
1986 Address end,
1987 int* offset) {
1988 // The start address of the current free region while sweeping the space.
1989 // This address is set when a transition from live to non-live objects is
1990 // encountered. A value (an encoding of the 'next free region' pointer)
1991 // is written to memory at this address when a transition from non-live to
1992 // live objects is encountered.
1993 Address free_start = NULL;
1994
1995 // A flag giving the state of the previously swept object. Initially true
1996 // to ensure that free_start is initialized to a proper address before
1997 // trying to write to it.
1998 bool is_prev_alive = true;
1999
2000 int object_size; // Will be set on each iteration of the loop.
2001 for (Address current = start; current < end; current += object_size) {
2002 HeapObject* object = HeapObject::FromAddress(current);
2003 if (object->IsMarked()) {
2004 object->ClearMark();
2005 collector->tracer()->decrement_marked_count();
2006 object_size = object->Size();
2007
2008 Object* forwarded =
2009 Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
2010 Encode(collector->heap(), object, object_size, forwarded, offset);
2011
2012 #ifdef DEBUG
2013 if (FLAG_gc_verbose) {
2014 PrintF("forward %p -> %p.\n", object->address(),
2015 HeapObject::cast(forwarded)->address());
2016 }
2017 #endif
2018 if (!is_prev_alive) { // Transition from non-live to live.
2019 EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
2020 is_prev_alive = true;
2021 }
2022 } else { // Non-live object.
2023 object_size = object->Size();
2024 ProcessNonLive(object, collector->heap()->isolate());
2025 if (is_prev_alive) { // Transition from live to non-live.
2026 free_start = current;
2027 is_prev_alive = false;
2028 }
2029 LiveObjectList::ProcessNonLive(object);
2030 }
2031 }
2032
2033 // If we ended on a free region, mark it.
2034 if (!is_prev_alive) {
2035 EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
2036 }
2037 }
2038
2039
2040 // Functions to encode the forwarding pointers in each compactable space.
2041 void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
2042 int ignored;
2043 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
2044 EncodeForwardingAddressInNewSpace,
2045 IgnoreNonLiveObject>(
2046 this,
2047 heap()->new_space()->bottom(),
2048 heap()->new_space()->top(),
2049 &ignored);
2050 }
2051
2052
2053 template<MarkCompactCollector::AllocationFunction Alloc,
2054 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
2055 void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
2056 PagedSpace* space) {
2057 PageIterator it(space, PageIterator::PAGES_IN_USE);
2058 while (it.has_next()) {
2059 Page* p = it.next();
2060
2061 // The offset of each live object in the page from the first live object
2062 // in the page.
2063 int offset = 0;
2064 EncodeForwardingAddressesInRange<Alloc,
2065 EncodeForwardingAddressInPagedSpace,
2066 ProcessNonLive>(
2067 this,
2068 p->ObjectAreaStart(),
2069 p->AllocationTop(),
2070 &offset);
2071 }
2072 }
2073
2074 2366
2075 // We scavange new space simultaneously with sweeping. This is done in two 2367 // We scavange new space simultaneously with sweeping. This is done in two
2076 // passes. 2368 // passes.
2369 //
2077 // The first pass migrates all alive objects from one semispace to another or 2370 // The first pass migrates all alive objects from one semispace to another or
2078 // promotes them to old space. Forwading address is written directly into 2371 // promotes them to old space. Forwarding address is written directly into
2079 // first word of object without any encoding. If object is dead we are writing 2372 // first word of object without any encoding. If object is dead we write
2080 // NULL as a forwarding address. 2373 // NULL as a forwarding address.
2081 // The second pass updates pointers to new space in all spaces. It is possible 2374 //
2082 // to encounter pointers to dead objects during traversal of dirty regions we 2375 // The second pass updates pointers to new space in all spaces. It is possible
2083 // should clear them to avoid encountering them during next dirty regions 2376 // to encounter pointers to dead new space objects during traversal of pointers
2084 // iteration. 2377 // to new space. We should clear them to avoid encountering them during next
2085 static void MigrateObject(Heap* heap, 2378 // pointer iteration. This is an issue if the store buffer overflows and we
2086 Address dst, 2379 // have to scan the entire old space, including dead objects, looking for
2087 Address src, 2380 // pointers to new space.
2088 int size, 2381 void MarkCompactCollector::MigrateObject(Address dst,
2089 bool to_old_space) { 2382 Address src,
2090 if (to_old_space) { 2383 int size,
2091 heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size); 2384 AllocationSpace dest) {
2385 HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
2386 if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
2387 Address src_slot = src;
2388 Address dst_slot = dst;
2389 ASSERT(IsAligned(size, kPointerSize));
2390
2391 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2392 Object* value = Memory::Object_at(src_slot);
2393
2394 Memory::Object_at(dst_slot) = value;
2395
2396 if (heap_->InNewSpace(value)) {
2397 heap_->store_buffer()->Mark(dst_slot);
2398 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2399 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2400 &migration_slots_buffer_,
2401 reinterpret_cast<Object**>(dst_slot),
2402 SlotsBuffer::IGNORE_OVERFLOW);
2403 }
2404
2405 src_slot += kPointerSize;
2406 dst_slot += kPointerSize;
2407 }
2408
2409 if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2410 Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2411 Address code_entry = Memory::Address_at(code_entry_slot);
2412
2413 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2414 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2415 &migration_slots_buffer_,
2416 SlotsBuffer::CODE_ENTRY_SLOT,
2417 code_entry_slot,
2418 SlotsBuffer::IGNORE_OVERFLOW);
2419 }
2420 }
2421 } else if (dest == CODE_SPACE) {
2422 PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
2423 heap()->MoveBlock(dst, src, size);
2424 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2425 &migration_slots_buffer_,
2426 SlotsBuffer::RELOCATED_CODE_OBJECT,
2427 dst,
2428 SlotsBuffer::IGNORE_OVERFLOW);
2429 Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2092 } else { 2430 } else {
2093 heap->CopyBlock(dst, src, size); 2431 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2432 heap()->MoveBlock(dst, src, size);
2094 } 2433 }
2095
2096 Memory::Address_at(src) = dst; 2434 Memory::Address_at(src) = dst;
2097 } 2435 }
2098 2436
2099 2437
2100 class StaticPointersToNewGenUpdatingVisitor : public
2101 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
2102 public:
2103 static inline void VisitPointer(Heap* heap, Object** p) {
2104 if (!(*p)->IsHeapObject()) return;
2105
2106 HeapObject* obj = HeapObject::cast(*p);
2107 Address old_addr = obj->address();
2108
2109 if (heap->new_space()->Contains(obj)) {
2110 ASSERT(heap->InFromSpace(*p));
2111 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
2112 }
2113 }
2114 };
2115
2116
2117 // Visitor for updating pointers from live objects in old spaces to new space. 2438 // Visitor for updating pointers from live objects in old spaces to new space.
2118 // It does not expect to encounter pointers to dead objects. 2439 // It does not expect to encounter pointers to dead objects.
2119 class PointersToNewGenUpdatingVisitor: public ObjectVisitor { 2440 class PointersUpdatingVisitor: public ObjectVisitor {
2120 public: 2441 public:
2121 explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { } 2442 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2122 2443
2123 void VisitPointer(Object** p) { 2444 void VisitPointer(Object** p) {
2124 StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p); 2445 UpdatePointer(p);
2125 } 2446 }
2126 2447
2127 void VisitPointers(Object** start, Object** end) { 2448 void VisitPointers(Object** start, Object** end) {
2128 for (Object** p = start; p < end; p++) { 2449 for (Object** p = start; p < end; p++) UpdatePointer(p);
2129 StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p); 2450 }
2130 } 2451
2452 void VisitEmbeddedPointer(Code* host, Object** p) {
2453 UpdatePointer(p);
2131 } 2454 }
2132 2455
2133 void VisitCodeTarget(RelocInfo* rinfo) { 2456 void VisitCodeTarget(RelocInfo* rinfo) {
2134 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); 2457 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2135 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); 2458 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2136 VisitPointer(&target); 2459 VisitPointer(&target);
2137 rinfo->set_target_address(Code::cast(target)->instruction_start()); 2460 rinfo->set_target_address(Code::cast(target)->instruction_start());
2138 } 2461 }
2139 2462
2140 void VisitDebugTarget(RelocInfo* rinfo) { 2463 void VisitDebugTarget(RelocInfo* rinfo) {
2141 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && 2464 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2142 rinfo->IsPatchedReturnSequence()) || 2465 rinfo->IsPatchedReturnSequence()) ||
2143 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && 2466 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2144 rinfo->IsPatchedDebugBreakSlotSequence())); 2467 rinfo->IsPatchedDebugBreakSlotSequence()));
2145 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); 2468 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2146 VisitPointer(&target); 2469 VisitPointer(&target);
2147 rinfo->set_call_address(Code::cast(target)->instruction_start()); 2470 rinfo->set_call_address(Code::cast(target)->instruction_start());
2148 } 2471 }
2149 2472
2473 static inline void UpdateSlot(Heap* heap, Object** slot) {
2474 Object* obj = *slot;
2475
2476 if (!obj->IsHeapObject()) return;
2477
2478 HeapObject* heap_obj = HeapObject::cast(obj);
2479
2480 MapWord map_word = heap_obj->map_word();
2481 if (map_word.IsForwardingAddress()) {
2482 ASSERT(heap->InFromSpace(heap_obj) ||
2483 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2484 HeapObject* target = map_word.ToForwardingAddress();
2485 *slot = target;
2486 ASSERT(!heap->InFromSpace(target) &&
2487 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2488 }
2489 }
2490
2150 private: 2491 private:
2492 inline void UpdatePointer(Object** p) {
2493 UpdateSlot(heap_, p);
2494 }
2495
2151 Heap* heap_; 2496 Heap* heap_;
2152 }; 2497 };
2153 2498
2154 2499
2155 // Visitor for updating pointers from live objects in old spaces to new space. 2500 static void UpdatePointer(HeapObject** p, HeapObject* object) {
2156 // It can encounter pointers to dead objects in new space when traversing map 2501 ASSERT(*p == object);
2157 // space (see comment for MigrateObject).
2158 static void UpdatePointerToNewGen(HeapObject** p) {
2159 if (!(*p)->IsHeapObject()) return;
2160 2502
2161 Address old_addr = (*p)->address(); 2503 Address old_addr = object->address();
2162 ASSERT(HEAP->InFromSpace(*p));
2163 2504
2164 Address new_addr = Memory::Address_at(old_addr); 2505 Address new_addr = Memory::Address_at(old_addr);
2165 2506
2166 if (new_addr == NULL) { 2507 // The new space sweep will overwrite the map word of dead objects
2167 // We encountered pointer to a dead object. Clear it so we will 2508 // with NULL. In this case we do not need to transfer this entry to
2168 // not visit it again during next iteration of dirty regions. 2509 // the store buffer which we are rebuilding.
2169 *p = NULL; 2510 if (new_addr != NULL) {
2511 *p = HeapObject::FromAddress(new_addr);
2170 } else { 2512 } else {
2171 *p = HeapObject::FromAddress(new_addr); 2513 // We have to zap this pointer, because the store buffer may overflow later,
2514 // and then we have to scan the entire heap and we don't want to find
2515 // spurious newspace pointers in the old space.
2516 *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
2172 } 2517 }
2173 } 2518 }
2174 2519
2175 2520
2176 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, 2521 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2177 Object** p) { 2522 Object** p) {
2178 Address old_addr = HeapObject::cast(*p)->address(); 2523 MapWord map_word = HeapObject::cast(*p)->map_word();
2179 Address new_addr = Memory::Address_at(old_addr); 2524
2180 return String::cast(HeapObject::FromAddress(new_addr)); 2525 if (map_word.IsForwardingAddress()) {
2526 return String::cast(map_word.ToForwardingAddress());
2527 }
2528
2529 return String::cast(*p);
2181 } 2530 }
2182 2531
2183 2532
2184 static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) { 2533 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2534 int object_size) {
2185 Object* result; 2535 Object* result;
2186 2536
2187 if (object_size > heap->MaxObjectSizeInPagedSpace()) { 2537 if (object_size > heap()->MaxObjectSizeInPagedSpace()) {
2188 MaybeObject* maybe_result = 2538 MaybeObject* maybe_result =
2189 heap->lo_space()->AllocateRawFixedArray(object_size); 2539 heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
2190 if (maybe_result->ToObject(&result)) { 2540 if (maybe_result->ToObject(&result)) {
2191 HeapObject* target = HeapObject::cast(result); 2541 HeapObject* target = HeapObject::cast(result);
2192 MigrateObject(heap, target->address(), object->address(), object_size, 2542 MigrateObject(target->address(),
2193 true); 2543 object->address(),
2194 heap->mark_compact_collector()->tracer()-> 2544 object_size,
2545 LO_SPACE);
2546 heap()->mark_compact_collector()->tracer()->
2195 increment_promoted_objects_size(object_size); 2547 increment_promoted_objects_size(object_size);
2196 return true; 2548 return true;
2197 } 2549 }
2198 } else { 2550 } else {
2199 OldSpace* target_space = heap->TargetSpace(object); 2551 OldSpace* target_space = heap()->TargetSpace(object);
2200 2552
2201 ASSERT(target_space == heap->old_pointer_space() || 2553 ASSERT(target_space == heap()->old_pointer_space() ||
2202 target_space == heap->old_data_space()); 2554 target_space == heap()->old_data_space());
2203 MaybeObject* maybe_result = target_space->AllocateRaw(object_size); 2555 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2204 if (maybe_result->ToObject(&result)) { 2556 if (maybe_result->ToObject(&result)) {
2205 HeapObject* target = HeapObject::cast(result); 2557 HeapObject* target = HeapObject::cast(result);
2206 MigrateObject(heap, 2558 MigrateObject(target->address(),
2207 target->address(),
2208 object->address(), 2559 object->address(),
2209 object_size, 2560 object_size,
2210 target_space == heap->old_pointer_space()); 2561 target_space->identity());
2211 heap->mark_compact_collector()->tracer()-> 2562 heap()->mark_compact_collector()->tracer()->
2212 increment_promoted_objects_size(object_size); 2563 increment_promoted_objects_size(object_size);
2213 return true; 2564 return true;
2214 } 2565 }
2215 } 2566 }
2216 2567
2217 return false; 2568 return false;
2218 } 2569 }
2219 2570
2220 2571
2221 static void SweepNewSpace(Heap* heap, NewSpace* space) { 2572 void MarkCompactCollector::EvacuateNewSpace() {
2222 heap->CheckNewSpaceExpansionCriteria(); 2573 heap()->CheckNewSpaceExpansionCriteria();
2223 2574
2224 Address from_bottom = space->bottom(); 2575 NewSpace* new_space = heap()->new_space();
2225 Address from_top = space->top(); 2576
2577 // Store allocation range before flipping semispaces.
2578 Address from_bottom = new_space->bottom();
2579 Address from_top = new_space->top();
2226 2580
2227 // Flip the semispaces. After flipping, to space is empty, from space has 2581 // Flip the semispaces. After flipping, to space is empty, from space has
2228 // live objects. 2582 // live objects.
2229 space->Flip(); 2583 new_space->Flip();
2230 space->ResetAllocationInfo(); 2584 new_space->ResetAllocationInfo();
2231 2585
2232 int size = 0;
2233 int survivors_size = 0; 2586 int survivors_size = 0;
2234 2587
2235 // First pass: traverse all objects in inactive semispace, remove marks, 2588 // First pass: traverse all objects in inactive semispace, remove marks,
2236 // migrate live objects and write forwarding addresses. 2589 // migrate live objects and write forwarding addresses. This stage puts
2237 for (Address current = from_bottom; current < from_top; current += size) { 2590 // new entries in the store buffer and may cause some pages to be marked
2238 HeapObject* object = HeapObject::FromAddress(current); 2591 // scan-on-scavenge.
2239 2592 SemiSpaceIterator from_it(from_bottom, from_top);
2240 if (object->IsMarked()) { 2593 for (HeapObject* object = from_it.Next();
2241 object->ClearMark(); 2594 object != NULL;
2242 heap->mark_compact_collector()->tracer()->decrement_marked_count(); 2595 object = from_it.Next()) {
2243 2596 MarkBit mark_bit = Marking::MarkBitFrom(object);
2244 size = object->Size(); 2597 if (mark_bit.Get()) {
2598 mark_bit.Clear();
2599 // Don't bother decrementing live bytes count. We'll discard the
2600 // entire page at the end.
2601 int size = object->Size();
2245 survivors_size += size; 2602 survivors_size += size;
2246 2603
2247 // Aggressively promote young survivors to the old space. 2604 // Aggressively promote young survivors to the old space.
2248 if (TryPromoteObject(heap, object, size)) { 2605 if (TryPromoteObject(object, size)) {
2249 continue; 2606 continue;
2250 } 2607 }
2251 2608
2252 // Promotion failed. Just migrate object to another semispace. 2609 // Promotion failed. Just migrate object to another semispace.
2253 // Allocation cannot fail at this point: semispaces are of equal size. 2610 MaybeObject* allocation = new_space->AllocateRaw(size);
2254 Object* target = space->AllocateRaw(size)->ToObjectUnchecked(); 2611 if (allocation->IsFailure()) {
2255 2612 if (!new_space->AddFreshPage()) {
2256 MigrateObject(heap, 2613 // Shouldn't happen. We are sweeping linearly, and to-space
2257 HeapObject::cast(target)->address(), 2614 // has the same number of pages as from-space, so there is
2258 current, 2615 // always room.
2616 UNREACHABLE();
2617 }
2618 allocation = new_space->AllocateRaw(size);
2619 ASSERT(!allocation->IsFailure());
2620 }
2621 Object* target = allocation->ToObjectUnchecked();
2622
2623 MigrateObject(HeapObject::cast(target)->address(),
2624 object->address(),
2259 size, 2625 size,
2260 false); 2626 NEW_SPACE);
2261 } else { 2627 } else {
2262 // Process the dead object before we write a NULL into its header. 2628 // Process the dead object before we write a NULL into its header.
2263 LiveObjectList::ProcessNonLive(object); 2629 LiveObjectList::ProcessNonLive(object);
2264 2630
2265 size = object->Size(); 2631 // Mark dead objects in the new space with null in their map field.
2266 Memory::Address_at(current) = NULL; 2632 Memory::Address_at(object->address()) = NULL;
2267 } 2633 }
2268 } 2634 }
2635
2636 heap_->IncrementYoungSurvivorsCounter(survivors_size);
2637 new_space->set_age_mark(new_space->top());
2638 }
2639
2640
2641 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
2642 AlwaysAllocateScope always_allocate;
2643 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2644 ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
2645 MarkBit::CellType* cells = p->markbits()->cells();
2646 p->MarkSweptPrecisely();
2647
2648 int last_cell_index =
2649 Bitmap::IndexToCell(
2650 Bitmap::CellAlignIndex(
2651 p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
2652
2653 int cell_index = Page::kFirstUsedCell;
2654 Address cell_base = p->ObjectAreaStart();
2655 int offsets[16];
2656
2657 for (cell_index = Page::kFirstUsedCell;
2658 cell_index < last_cell_index;
2659 cell_index++, cell_base += 32 * kPointerSize) {
2660 ASSERT((unsigned)cell_index ==
2661 Bitmap::IndexToCell(
2662 Bitmap::CellAlignIndex(
2663 p->AddressToMarkbitIndex(cell_base))));
2664 if (cells[cell_index] == 0) continue;
2665
2666 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2667 for (int i = 0; i < live_objects; i++) {
2668 Address object_addr = cell_base + offsets[i] * kPointerSize;
2669 HeapObject* object = HeapObject::FromAddress(object_addr);
2670 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2671
2672 int size = object->Size();
2673
2674 MaybeObject* target = space->AllocateRaw(size);
2675 if (target->IsFailure()) {
2676 // OS refused to give us memory.
2677 V8::FatalProcessOutOfMemory("Evacuation");
2678 return;
2679 }
2680
2681 Object* target_object = target->ToObjectUnchecked();
2682
2683 MigrateObject(HeapObject::cast(target_object)->address(),
2684 object_addr,
2685 size,
2686 space->identity());
2687 ASSERT(object->map_word().IsForwardingAddress());
2688 }
2689
2690 // Clear marking bits for current cell.
2691 cells[cell_index] = 0;
2692 }
2693 p->ResetLiveBytes();
2694 }
2695
2696
2697 void MarkCompactCollector::EvacuatePages() {
2698 int npages = evacuation_candidates_.length();
2699 for (int i = 0; i < npages; i++) {
2700 Page* p = evacuation_candidates_[i];
2701 ASSERT(p->IsEvacuationCandidate() ||
2702 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
2703 if (p->IsEvacuationCandidate()) {
2704 // During compaction we might have to request a new page.
2705 // Check that space still have room for that.
2706 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
2707 EvacuateLiveObjectsFromPage(p);
2708 } else {
2709 // Without room for expansion evacuation is not guaranteed to succeed.
2710 // Pessimistically abandon unevacuated pages.
2711 for (int j = i; j < npages; j++) {
2712 Page* page = evacuation_candidates_[j];
2713 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
2714 page->ClearEvacuationCandidate();
2715 page->SetFlag(Page::RESCAN_ON_EVACUATION);
2716 }
2717 return;
2718 }
2719 }
2720 }
2721 }
2722
2723
2724 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
2725 public:
2726 virtual Object* RetainAs(Object* object) {
2727 if (object->IsHeapObject()) {
2728 HeapObject* heap_object = HeapObject::cast(object);
2729 MapWord map_word = heap_object->map_word();
2730 if (map_word.IsForwardingAddress()) {
2731 return map_word.ToForwardingAddress();
2732 }
2733 }
2734 return object;
2735 }
2736 };
2737
2738
2739 static inline void UpdateSlot(ObjectVisitor* v,
2740 SlotsBuffer::SlotType slot_type,
2741 Address addr) {
2742 switch (slot_type) {
2743 case SlotsBuffer::CODE_TARGET_SLOT: {
2744 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
2745 rinfo.Visit(v);
2746 break;
2747 }
2748 case SlotsBuffer::CODE_ENTRY_SLOT: {
2749 v->VisitCodeEntry(addr);
2750 break;
2751 }
2752 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
2753 HeapObject* obj = HeapObject::FromAddress(addr);
2754 Code::cast(obj)->CodeIterateBody(v);
2755 break;
2756 }
2757 case SlotsBuffer::DEBUG_TARGET_SLOT: {
2758 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
2759 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
2760 break;
2761 }
2762 case SlotsBuffer::JS_RETURN_SLOT: {
2763 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
2764 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
2765 break;
2766 }
2767 default:
2768 UNREACHABLE();
2769 break;
2770 }
2771 }
2772
2773
2774 enum SweepingMode {
2775 SWEEP_ONLY,
2776 SWEEP_AND_VISIT_LIVE_OBJECTS
2777 };
2778
2779
2780 enum SkipListRebuildingMode {
2781 REBUILD_SKIP_LIST,
2782 IGNORE_SKIP_LIST
2783 };
2784
2785
2786 // Sweep a space precisely. After this has been done the space can
2787 // be iterated precisely, hitting only the live objects. Code space
2788 // is always swept precisely because we want to be able to iterate
2789 // over it. Map space is swept precisely, because it is not compacted.
2790 // Slots in live objects pointing into evacuation candidates are updated
2791 // if requested.
2792 template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
2793 static void SweepPrecisely(PagedSpace* space,
2794 Page* p,
2795 ObjectVisitor* v) {
2796 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
2797 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
2798 space->identity() == CODE_SPACE);
2799 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
2800
2801 MarkBit::CellType* cells = p->markbits()->cells();
2802 p->MarkSweptPrecisely();
2803
2804 int last_cell_index =
2805 Bitmap::IndexToCell(
2806 Bitmap::CellAlignIndex(
2807 p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
2808
2809 int cell_index = Page::kFirstUsedCell;
2810 Address free_start = p->ObjectAreaStart();
2811 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
2812 Address object_address = p->ObjectAreaStart();
2813 int offsets[16];
2814
2815 SkipList* skip_list = p->skip_list();
2816 int curr_region = -1;
2817 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
2818 skip_list->Clear();
2819 }
2820
2821 for (cell_index = Page::kFirstUsedCell;
2822 cell_index < last_cell_index;
2823 cell_index++, object_address += 32 * kPointerSize) {
2824 ASSERT((unsigned)cell_index ==
2825 Bitmap::IndexToCell(
2826 Bitmap::CellAlignIndex(
2827 p->AddressToMarkbitIndex(object_address))));
2828 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2829 int live_index = 0;
2830 for ( ; live_objects != 0; live_objects--) {
2831 Address free_end = object_address + offsets[live_index++] * kPointerSize;
2832 if (free_end != free_start) {
2833 space->Free(free_start, static_cast<int>(free_end - free_start));
2834 }
2835 HeapObject* live_object = HeapObject::FromAddress(free_end);
2836 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
2837 Map* map = live_object->map();
2838 int size = live_object->SizeFromMap(map);
2839 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
2840 live_object->IterateBody(map->instance_type(), size, v);
2841 }
2842 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
2843 int new_region_start =
2844 SkipList::RegionNumber(free_end);
2845 int new_region_end =
2846 SkipList::RegionNumber(free_end + size - kPointerSize);
2847 if (new_region_start != curr_region ||
2848 new_region_end != curr_region) {
2849 skip_list->AddObject(free_end, size);
2850 curr_region = new_region_end;
2851 }
2852 }
2853 free_start = free_end + size;
2854 }
2855 // Clear marking bits for current cell.
2856 cells[cell_index] = 0;
2857 }
2858 if (free_start != p->ObjectAreaEnd()) {
2859 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
2860 }
2861 p->ResetLiveBytes();
2862 }
2863
2864
2865 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
2866 Page* p = Page::FromAddress(code->address());
2867
2868 if (p->IsEvacuationCandidate() ||
2869 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
2870 return false;
2871 }
2872
2873 Address code_start = code->address();
2874 Address code_end = code_start + code->Size();
2875
2876 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
2877 uint32_t end_index =
2878 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
2879
2880 Bitmap* b = p->markbits();
2881
2882 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
2883 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
2884
2885 MarkBit::CellType* start_cell = start_mark_bit.cell();
2886 MarkBit::CellType* end_cell = end_mark_bit.cell();
2887
2888 if (value) {
2889 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
2890 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
2891
2892 if (start_cell == end_cell) {
2893 *start_cell |= start_mask & end_mask;
2894 } else {
2895 *start_cell |= start_mask;
2896 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
2897 *cell = ~0;
2898 }
2899 *end_cell |= end_mask;
2900 }
2901 } else {
2902 for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
2903 *cell = 0;
2904 }
2905 }
2906
2907 return true;
2908 }
2909
2910
2911 static bool IsOnInvalidatedCodeObject(Address addr) {
2912 // We did not record any slots in large objects thus
2913 // we can safely go to the page from the slot address.
2914 Page* p = Page::FromAddress(addr);
2915
2916 // First check owner's identity because old pointer and old data spaces
2917 // are swept lazily and might still have non-zero mark-bits on some
2918 // pages.
2919 if (p->owner()->identity() != CODE_SPACE) return false;
2920
2921 // In code space only bits on evacuation candidates (but we don't record
2922 // any slots on them) and under invalidated code objects are non-zero.
2923 MarkBit mark_bit =
2924 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
2925
2926 return mark_bit.Get();
2927 }
2928
2929
2930 void MarkCompactCollector::InvalidateCode(Code* code) {
2931 if (heap_->incremental_marking()->IsCompacting() &&
2932 !ShouldSkipEvacuationSlotRecording(code)) {
2933 ASSERT(compacting_);
2934
2935 // If the object is white than no slots were recorded on it yet.
2936 MarkBit mark_bit = Marking::MarkBitFrom(code);
2937 if (Marking::IsWhite(mark_bit)) return;
2938
2939 invalidated_code_.Add(code);
2940 }
2941 }
2942
2943
2944 bool MarkCompactCollector::MarkInvalidatedCode() {
2945 bool code_marked = false;
2946
2947 int length = invalidated_code_.length();
2948 for (int i = 0; i < length; i++) {
2949 Code* code = invalidated_code_[i];
2950
2951 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
2952 code_marked = true;
2953 }
2954 }
2955
2956 return code_marked;
2957 }
2958
2959
2960 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
2961 int length = invalidated_code_.length();
2962 for (int i = 0; i < length; i++) {
2963 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
2964 }
2965 }
2966
2967
2968 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
2969 int length = invalidated_code_.length();
2970 for (int i = 0; i < length; i++) {
2971 Code* code = invalidated_code_[i];
2972 if (code != NULL) {
2973 code->Iterate(visitor);
2974 SetMarkBitsUnderInvalidatedCode(code, false);
2975 }
2976 }
2977 invalidated_code_.Rewind(0);
2978 }
2979
2980
2981 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
2982 bool code_slots_filtering_required = MarkInvalidatedCode();
2983
2984 EvacuateNewSpace();
2985 EvacuatePages();
2269 2986
2270 // Second pass: find pointers to new space and update them. 2987 // Second pass: find pointers to new space and update them.
2271 PointersToNewGenUpdatingVisitor updating_visitor(heap); 2988 PointersUpdatingVisitor updating_visitor(heap());
2272 2989
2273 // Update pointers in to space. 2990 // Update pointers in to space.
2274 Address current = space->bottom(); 2991 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
2275 while (current < space->top()) { 2992 heap()->new_space()->top());
2276 HeapObject* object = HeapObject::FromAddress(current); 2993 for (HeapObject* object = to_it.Next();
2277 current += 2994 object != NULL;
2278 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(), 2995 object = to_it.Next()) {
2279 object); 2996 Map* map = object->map();
2997 object->IterateBody(map->instance_type(),
2998 object->SizeFromMap(map),
2999 &updating_visitor);
2280 } 3000 }
2281 3001
2282 // Update roots. 3002 // Update roots.
2283 heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 3003 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
2284 LiveObjectList::IterateElements(&updating_visitor); 3004 LiveObjectList::IterateElements(&updating_visitor);
2285 3005
2286 // Update pointers in old spaces. 3006 {
2287 heap->IterateDirtyRegions(heap->old_pointer_space(), 3007 StoreBufferRebuildScope scope(heap_,
2288 &Heap::IteratePointersInDirtyRegion, 3008 heap_->store_buffer(),
2289 &UpdatePointerToNewGen, 3009 &Heap::ScavengeStoreBufferCallback);
2290 heap->WATERMARK_SHOULD_BE_VALID); 3010 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
2291 3011 }
2292 heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen); 3012
3013 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3014 migration_slots_buffer_,
3015 code_slots_filtering_required);
3016 if (FLAG_trace_fragmentation) {
3017 PrintF(" migration slots buffer: %d\n",
3018 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3019 }
3020
3021 if (compacting_ && was_marked_incrementally_) {
3022 // It's difficult to filter out slots recorded for large objects.
3023 LargeObjectIterator it(heap_->lo_space());
3024 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3025 // LargeObjectSpace is not swept yet thus we have to skip
3026 // dead objects explicitly.
3027 if (!IsMarked(obj)) continue;
3028
3029 Page* p = Page::FromAddress(obj->address());
3030 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3031 obj->Iterate(&updating_visitor);
3032 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3033 }
3034 }
3035 }
3036
3037 int npages = evacuation_candidates_.length();
3038 for (int i = 0; i < npages; i++) {
3039 Page* p = evacuation_candidates_[i];
3040 ASSERT(p->IsEvacuationCandidate() ||
3041 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3042
3043 if (p->IsEvacuationCandidate()) {
3044 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3045 p->slots_buffer(),
3046 code_slots_filtering_required);
3047 if (FLAG_trace_fragmentation) {
3048 PrintF(" page %p slots buffer: %d\n",
3049 reinterpret_cast<void*>(p),
3050 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3051 }
3052
3053 // Important: skip list should be cleared only after roots were updated
3054 // because root iteration traverses the stack and might have to find code
3055 // objects from non-updated pc pointing into evacuation candidate.
3056 SkipList* list = p->skip_list();
3057 if (list != NULL) list->Clear();
3058 } else {
3059 if (FLAG_gc_verbose) {
3060 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3061 reinterpret_cast<intptr_t>(p));
3062 }
3063 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3064 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3065
3066 switch (space->identity()) {
3067 case OLD_DATA_SPACE:
3068 SweepConservatively(space, p);
3069 break;
3070 case OLD_POINTER_SPACE:
3071 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3072 space, p, &updating_visitor);
3073 break;
3074 case CODE_SPACE:
3075 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3076 space, p, &updating_visitor);
3077 break;
3078 default:
3079 UNREACHABLE();
3080 break;
3081 }
3082 }
3083 }
2293 3084
2294 // Update pointers from cells. 3085 // Update pointers from cells.
2295 HeapObjectIterator cell_iterator(heap->cell_space()); 3086 HeapObjectIterator cell_iterator(heap_->cell_space());
2296 for (HeapObject* cell = cell_iterator.next(); 3087 for (HeapObject* cell = cell_iterator.Next();
2297 cell != NULL; 3088 cell != NULL;
2298 cell = cell_iterator.next()) { 3089 cell = cell_iterator.Next()) {
2299 if (cell->IsJSGlobalPropertyCell()) { 3090 if (cell->IsJSGlobalPropertyCell()) {
2300 Address value_address = 3091 Address value_address =
2301 reinterpret_cast<Address>(cell) + 3092 reinterpret_cast<Address>(cell) +
2302 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 3093 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
2303 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); 3094 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
2304 } 3095 }
2305 } 3096 }
2306 3097
2307 // Update pointer from the global contexts list. 3098 // Update pointer from the global contexts list.
2308 updating_visitor.VisitPointer(heap->global_contexts_list_address()); 3099 updating_visitor.VisitPointer(heap_->global_contexts_list_address());
3100
3101 heap_->symbol_table()->Iterate(&updating_visitor);
2309 3102
2310 // Update pointers from external string table. 3103 // Update pointers from external string table.
2311 heap->UpdateNewSpaceReferencesInExternalStringTable( 3104 heap_->UpdateReferencesInExternalStringTable(
2312 &UpdateNewSpaceReferenceInExternalStringTableEntry); 3105 &UpdateReferenceInExternalStringTableEntry);
2313
2314 // All pointers were updated. Update auxiliary allocation info.
2315 heap->IncrementYoungSurvivorsCounter(survivors_size);
2316 space->set_age_mark(space->top());
2317 3106
2318 // Update JSFunction pointers from the runtime profiler. 3107 // Update JSFunction pointers from the runtime profiler.
2319 heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); 3108 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
2320 } 3109 &updating_visitor);
2321 3110
2322 3111 EvacuationWeakObjectRetainer evacuation_object_retainer;
2323 static void SweepSpace(Heap* heap, PagedSpace* space) { 3112 heap()->ProcessWeakReferences(&evacuation_object_retainer);
2324 PageIterator it(space, PageIterator::PAGES_IN_USE); 3113
2325 3114 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
2326 // During sweeping of paged space we are trying to find longest sequences 3115 // under it.
2327 // of pages without live objects and free them (instead of putting them on 3116 ProcessInvalidatedCode(&updating_visitor);
2328 // the free list). 3117
2329 3118 #ifdef DEBUG
2330 // Page preceding current. 3119 if (FLAG_verify_heap) {
2331 Page* prev = Page::FromAddress(NULL); 3120 VerifyEvacuation(heap_);
2332 3121 }
2333 // First empty page in a sequence. 3122 #endif
2334 Page* first_empty_page = Page::FromAddress(NULL); 3123
2335 3124 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
2336 // Page preceding first empty page. 3125 ASSERT(migration_slots_buffer_ == NULL);
2337 Page* prec_first_empty_page = Page::FromAddress(NULL); 3126 for (int i = 0; i < npages; i++) {
2338 3127 Page* p = evacuation_candidates_[i];
2339 // If last used page of space ends with a sequence of dead objects 3128 if (!p->IsEvacuationCandidate()) continue;
2340 // we can adjust allocation top instead of puting this free area into 3129 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2341 // the free list. Thus during sweeping we keep track of such areas 3130 space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
2342 // and defer their deallocation until the sweeping of the next page 3131 p->set_scan_on_scavenge(false);
2343 // is done: if one of the next pages contains live objects we have 3132 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
2344 // to put such area into the free list. 3133 p->ClearEvacuationCandidate();
2345 Address last_free_start = NULL; 3134 }
2346 int last_free_size = 0; 3135 evacuation_candidates_.Rewind(0);
3136 compacting_ = false;
3137 }
3138
3139
3140 static const int kStartTableEntriesPerLine = 5;
3141 static const int kStartTableLines = 171;
3142 static const int kStartTableInvalidLine = 127;
3143 static const int kStartTableUnusedEntry = 126;
3144
3145 #define _ kStartTableUnusedEntry
3146 #define X kStartTableInvalidLine
3147 // Mark-bit to object start offset table.
3148 //
3149 // The line is indexed by the mark bits in a byte. The first number on
3150 // the line describes the number of live object starts for the line and the
3151 // other numbers on the line describe the offsets (in words) of the object
3152 // starts.
3153 //
3154 // Since objects are at least 2 words large we don't have entries for two
3155 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3156 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3157 0, _, _, _, _, // 0
3158 1, 0, _, _, _, // 1
3159 1, 1, _, _, _, // 2
3160 X, _, _, _, _, // 3
3161 1, 2, _, _, _, // 4
3162 2, 0, 2, _, _, // 5
3163 X, _, _, _, _, // 6
3164 X, _, _, _, _, // 7
3165 1, 3, _, _, _, // 8
3166 2, 0, 3, _, _, // 9
3167 2, 1, 3, _, _, // 10
3168 X, _, _, _, _, // 11
3169 X, _, _, _, _, // 12
3170 X, _, _, _, _, // 13
3171 X, _, _, _, _, // 14
3172 X, _, _, _, _, // 15
3173 1, 4, _, _, _, // 16
3174 2, 0, 4, _, _, // 17
3175 2, 1, 4, _, _, // 18
3176 X, _, _, _, _, // 19
3177 2, 2, 4, _, _, // 20
3178 3, 0, 2, 4, _, // 21
3179 X, _, _, _, _, // 22
3180 X, _, _, _, _, // 23
3181 X, _, _, _, _, // 24
3182 X, _, _, _, _, // 25
3183 X, _, _, _, _, // 26
3184 X, _, _, _, _, // 27
3185 X, _, _, _, _, // 28
3186 X, _, _, _, _, // 29
3187 X, _, _, _, _, // 30
3188 X, _, _, _, _, // 31
3189 1, 5, _, _, _, // 32
3190 2, 0, 5, _, _, // 33
3191 2, 1, 5, _, _, // 34
3192 X, _, _, _, _, // 35
3193 2, 2, 5, _, _, // 36
3194 3, 0, 2, 5, _, // 37
3195 X, _, _, _, _, // 38
3196 X, _, _, _, _, // 39
3197 2, 3, 5, _, _, // 40
3198 3, 0, 3, 5, _, // 41
3199 3, 1, 3, 5, _, // 42
3200 X, _, _, _, _, // 43
3201 X, _, _, _, _, // 44
3202 X, _, _, _, _, // 45
3203 X, _, _, _, _, // 46
3204 X, _, _, _, _, // 47
3205 X, _, _, _, _, // 48
3206 X, _, _, _, _, // 49
3207 X, _, _, _, _, // 50
3208 X, _, _, _, _, // 51
3209 X, _, _, _, _, // 52
3210 X, _, _, _, _, // 53
3211 X, _, _, _, _, // 54
3212 X, _, _, _, _, // 55
3213 X, _, _, _, _, // 56
3214 X, _, _, _, _, // 57
3215 X, _, _, _, _, // 58
3216 X, _, _, _, _, // 59
3217 X, _, _, _, _, // 60
3218 X, _, _, _, _, // 61
3219 X, _, _, _, _, // 62
3220 X, _, _, _, _, // 63
3221 1, 6, _, _, _, // 64
3222 2, 0, 6, _, _, // 65
3223 2, 1, 6, _, _, // 66
3224 X, _, _, _, _, // 67
3225 2, 2, 6, _, _, // 68
3226 3, 0, 2, 6, _, // 69
3227 X, _, _, _, _, // 70
3228 X, _, _, _, _, // 71
3229 2, 3, 6, _, _, // 72
3230 3, 0, 3, 6, _, // 73
3231 3, 1, 3, 6, _, // 74
3232 X, _, _, _, _, // 75
3233 X, _, _, _, _, // 76
3234 X, _, _, _, _, // 77
3235 X, _, _, _, _, // 78
3236 X, _, _, _, _, // 79
3237 2, 4, 6, _, _, // 80
3238 3, 0, 4, 6, _, // 81
3239 3, 1, 4, 6, _, // 82
3240 X, _, _, _, _, // 83
3241 3, 2, 4, 6, _, // 84
3242 4, 0, 2, 4, 6, // 85
3243 X, _, _, _, _, // 86
3244 X, _, _, _, _, // 87
3245 X, _, _, _, _, // 88
3246 X, _, _, _, _, // 89
3247 X, _, _, _, _, // 90
3248 X, _, _, _, _, // 91
3249 X, _, _, _, _, // 92
3250 X, _, _, _, _, // 93
3251 X, _, _, _, _, // 94
3252 X, _, _, _, _, // 95
3253 X, _, _, _, _, // 96
3254 X, _, _, _, _, // 97
3255 X, _, _, _, _, // 98
3256 X, _, _, _, _, // 99
3257 X, _, _, _, _, // 100
3258 X, _, _, _, _, // 101
3259 X, _, _, _, _, // 102
3260 X, _, _, _, _, // 103
3261 X, _, _, _, _, // 104
3262 X, _, _, _, _, // 105
3263 X, _, _, _, _, // 106
3264 X, _, _, _, _, // 107
3265 X, _, _, _, _, // 108
3266 X, _, _, _, _, // 109
3267 X, _, _, _, _, // 110
3268 X, _, _, _, _, // 111
3269 X, _, _, _, _, // 112
3270 X, _, _, _, _, // 113
3271 X, _, _, _, _, // 114
3272 X, _, _, _, _, // 115
3273 X, _, _, _, _, // 116
3274 X, _, _, _, _, // 117
3275 X, _, _, _, _, // 118
3276 X, _, _, _, _, // 119
3277 X, _, _, _, _, // 120
3278 X, _, _, _, _, // 121
3279 X, _, _, _, _, // 122
3280 X, _, _, _, _, // 123
3281 X, _, _, _, _, // 124
3282 X, _, _, _, _, // 125
3283 X, _, _, _, _, // 126
3284 X, _, _, _, _, // 127
3285 1, 7, _, _, _, // 128
3286 2, 0, 7, _, _, // 129
3287 2, 1, 7, _, _, // 130
3288 X, _, _, _, _, // 131
3289 2, 2, 7, _, _, // 132
3290 3, 0, 2, 7, _, // 133
3291 X, _, _, _, _, // 134
3292 X, _, _, _, _, // 135
3293 2, 3, 7, _, _, // 136
3294 3, 0, 3, 7, _, // 137
3295 3, 1, 3, 7, _, // 138
3296 X, _, _, _, _, // 139
3297 X, _, _, _, _, // 140
3298 X, _, _, _, _, // 141
3299 X, _, _, _, _, // 142
3300 X, _, _, _, _, // 143
3301 2, 4, 7, _, _, // 144
3302 3, 0, 4, 7, _, // 145
3303 3, 1, 4, 7, _, // 146
3304 X, _, _, _, _, // 147
3305 3, 2, 4, 7, _, // 148
3306 4, 0, 2, 4, 7, // 149
3307 X, _, _, _, _, // 150
3308 X, _, _, _, _, // 151
3309 X, _, _, _, _, // 152
3310 X, _, _, _, _, // 153
3311 X, _, _, _, _, // 154
3312 X, _, _, _, _, // 155
3313 X, _, _, _, _, // 156
3314 X, _, _, _, _, // 157
3315 X, _, _, _, _, // 158
3316 X, _, _, _, _, // 159
3317 2, 5, 7, _, _, // 160
3318 3, 0, 5, 7, _, // 161
3319 3, 1, 5, 7, _, // 162
3320 X, _, _, _, _, // 163
3321 3, 2, 5, 7, _, // 164
3322 4, 0, 2, 5, 7, // 165
3323 X, _, _, _, _, // 166
3324 X, _, _, _, _, // 167
3325 3, 3, 5, 7, _, // 168
3326 4, 0, 3, 5, 7, // 169
3327 4, 1, 3, 5, 7 // 170
3328 };
3329 #undef _
3330 #undef X
3331
3332
3333 // Takes a word of mark bits. Returns the number of objects that start in the
3334 // range. Puts the offsets of the words in the supplied array.
3335 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3336 int objects = 0;
3337 int offset = 0;
3338
3339 // No consecutive 1 bits.
3340 ASSERT((mark_bits & 0x180) != 0x180);
3341 ASSERT((mark_bits & 0x18000) != 0x18000);
3342 ASSERT((mark_bits & 0x1800000) != 0x1800000);
3343
3344 while (mark_bits != 0) {
3345 int byte = (mark_bits & 0xff);
3346 mark_bits >>= 8;
3347 if (byte != 0) {
3348 ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
3349 char* table = kStartTable + byte * kStartTableEntriesPerLine;
3350 int objects_in_these_8_words = table[0];
3351 ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3352 ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3353 for (int i = 0; i < objects_in_these_8_words; i++) {
3354 starts[objects++] = offset + table[1 + i];
3355 }
3356 }
3357 offset += 8;
3358 }
3359 return objects;
3360 }
3361
3362
3363 static inline Address DigestFreeStart(Address approximate_free_start,
3364 uint32_t free_start_cell) {
3365 ASSERT(free_start_cell != 0);
3366
3367 // No consecutive 1 bits.
3368 ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3369
3370 int offsets[16];
3371 uint32_t cell = free_start_cell;
3372 int offset_of_last_live;
3373 if ((cell & 0x80000000u) != 0) {
3374 // This case would overflow below.
3375 offset_of_last_live = 31;
3376 } else {
3377 // Remove all but one bit, the most significant. This is an optimization
3378 // that may or may not be worthwhile.
3379 cell |= cell >> 16;
3380 cell |= cell >> 8;
3381 cell |= cell >> 4;
3382 cell |= cell >> 2;
3383 cell |= cell >> 1;
3384 cell = (cell + 1) >> 1;
3385 int live_objects = MarkWordToObjectStarts(cell, offsets);
3386 ASSERT(live_objects == 1);
3387 offset_of_last_live = offsets[live_objects - 1];
3388 }
3389 Address last_live_start =
3390 approximate_free_start + offset_of_last_live * kPointerSize;
3391 HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3392 Address free_start = last_live_start + last_live->Size();
3393 return free_start;
3394 }
3395
3396
3397 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3398 ASSERT(cell != 0);
3399
3400 // No consecutive 1 bits.
3401 ASSERT((cell & (cell << 1)) == 0);
3402
3403 int offsets[16];
3404 if (cell == 0x80000000u) { // Avoid overflow below.
3405 return block_address + 31 * kPointerSize;
3406 }
3407 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3408 ASSERT((first_set_bit & cell) == first_set_bit);
3409 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3410 ASSERT(live_objects == 1);
3411 USE(live_objects);
3412 return block_address + offsets[0] * kPointerSize;
3413 }
3414
3415
3416 // Sweeps a space conservatively. After this has been done the larger free
3417 // spaces have been put on the free list and the smaller ones have been
3418 // ignored and left untouched. A free space is always either ignored or put
3419 // on the free list, never split up into two parts. This is important
3420 // because it means that any FreeSpace maps left actually describe a region of
3421 // memory that can be ignored when scanning. Dead objects other than free
3422 // spaces will not contain the free space map.
3423 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
3424 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3425 MarkBit::CellType* cells = p->markbits()->cells();
3426 p->MarkSweptConservatively();
3427
3428 int last_cell_index =
3429 Bitmap::IndexToCell(
3430 Bitmap::CellAlignIndex(
3431 p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
3432
3433 int cell_index = Page::kFirstUsedCell;
3434 intptr_t freed_bytes = 0;
3435
3436 // This is the start of the 32 word block that we are currently looking at.
3437 Address block_address = p->ObjectAreaStart();
3438
3439 // Skip over all the dead objects at the start of the page and mark them free.
3440 for (cell_index = Page::kFirstUsedCell;
3441 cell_index < last_cell_index;
3442 cell_index++, block_address += 32 * kPointerSize) {
3443 if (cells[cell_index] != 0) break;
3444 }
3445 size_t size = block_address - p->ObjectAreaStart();
3446 if (cell_index == last_cell_index) {
3447 freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
3448 static_cast<int>(size)));
3449 ASSERT_EQ(0, p->LiveBytes());
3450 return freed_bytes;
3451 }
3452 // Grow the size of the start-of-page free space a little to get up to the
3453 // first live object.
3454 Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
3455 // Free the first free space.
3456 size = free_end - p->ObjectAreaStart();
3457 freed_bytes += space->Free(p->ObjectAreaStart(),
3458 static_cast<int>(size));
3459 // The start of the current free area is represented in undigested form by
3460 // the address of the last 32-word section that contained a live object and
3461 // the marking bitmap for that cell, which describes where the live object
3462 // started. Unless we find a large free space in the bitmap we will not
3463 // digest this pair into a real address. We start the iteration here at the
3464 // first word in the marking bit map that indicates a live object.
3465 Address free_start = block_address;
3466 uint32_t free_start_cell = cells[cell_index];
3467
3468 for ( ;
3469 cell_index < last_cell_index;
3470 cell_index++, block_address += 32 * kPointerSize) {
3471 ASSERT((unsigned)cell_index ==
3472 Bitmap::IndexToCell(
3473 Bitmap::CellAlignIndex(
3474 p->AddressToMarkbitIndex(block_address))));
3475 uint32_t cell = cells[cell_index];
3476 if (cell != 0) {
3477 // We have a live object. Check approximately whether it is more than 32
3478 // words since the last live object.
3479 if (block_address - free_start > 32 * kPointerSize) {
3480 free_start = DigestFreeStart(free_start, free_start_cell);
3481 if (block_address - free_start > 32 * kPointerSize) {
3482 // Now that we know the exact start of the free space it still looks
3483 // like we have a large enough free space to be worth bothering with.
3484 // so now we need to find the start of the first live object at the
3485 // end of the free space.
3486 free_end = StartOfLiveObject(block_address, cell);
3487 freed_bytes += space->Free(free_start,
3488 static_cast<int>(free_end - free_start));
3489 }
3490 }
3491 // Update our undigested record of where the current free area started.
3492 free_start = block_address;
3493 free_start_cell = cell;
3494 // Clear marking bits for current cell.
3495 cells[cell_index] = 0;
3496 }
3497 }
3498
3499 // Handle the free space at the end of the page.
3500 if (block_address - free_start > 32 * kPointerSize) {
3501 free_start = DigestFreeStart(free_start, free_start_cell);
3502 freed_bytes += space->Free(free_start,
3503 static_cast<int>(block_address - free_start));
3504 }
3505
3506 p->ResetLiveBytes();
3507 return freed_bytes;
3508 }
3509
3510
3511 void MarkCompactCollector::SweepSpace(PagedSpace* space,
3512 SweeperType sweeper) {
3513 space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3514 sweeper == LAZY_CONSERVATIVE);
3515
3516 space->ClearStats();
3517
3518 PageIterator it(space);
3519
3520 intptr_t freed_bytes = 0;
3521 intptr_t newspace_size = space->heap()->new_space()->Size();
3522 bool lazy_sweeping_active = false;
3523 bool unused_page_present = false;
2347 3524
2348 while (it.has_next()) { 3525 while (it.has_next()) {
2349 Page* p = it.next(); 3526 Page* p = it.next();
2350 3527
2351 bool is_previous_alive = true; 3528 // Clear sweeping flags indicating that marking bits are still intact.
2352 Address free_start = NULL; 3529 p->ClearSweptPrecisely();
2353 HeapObject* object; 3530 p->ClearSweptConservatively();
2354 3531
2355 for (Address current = p->ObjectAreaStart(); 3532 if (p->IsEvacuationCandidate()) {
2356 current < p->AllocationTop(); 3533 ASSERT(evacuation_candidates_.length() > 0);
2357 current += object->Size()) { 3534 continue;
2358 object = HeapObject::FromAddress(current); 3535 }
2359 if (object->IsMarked()) { 3536
2360 object->ClearMark(); 3537 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
2361 heap->mark_compact_collector()->tracer()->decrement_marked_count(); 3538 // Will be processed in EvacuateNewSpaceAndCandidates.
2362 3539 continue;
2363 if (!is_previous_alive) { // Transition from free to live. 3540 }
2364 space->DeallocateBlock(free_start, 3541
2365 static_cast<int>(current - free_start), 3542 if (lazy_sweeping_active) {
2366 true); 3543 if (FLAG_gc_verbose) {
2367 is_previous_alive = true; 3544 PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
3545 reinterpret_cast<intptr_t>(p));
3546 }
3547 continue;
3548 }
3549
3550 // One unused page is kept, all further are released before sweeping them.
3551 if (p->LiveBytes() == 0) {
3552 if (unused_page_present) {
3553 if (FLAG_gc_verbose) {
3554 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
3555 reinterpret_cast<intptr_t>(p));
2368 } 3556 }
2369 } else { 3557 space->ReleasePage(p);
2370 heap->mark_compact_collector()->ReportDeleteIfNeeded( 3558 continue;
2371 object, heap->isolate()); 3559 }
2372 if (is_previous_alive) { // Transition from live to free. 3560 unused_page_present = true;
2373 free_start = current; 3561 }
2374 is_previous_alive = false; 3562
3563 if (FLAG_gc_verbose) {
3564 PrintF("Sweeping 0x%" V8PRIxPTR " with sweeper %d.\n",
3565 reinterpret_cast<intptr_t>(p),
3566 sweeper);
3567 }
3568
3569 switch (sweeper) {
3570 case CONSERVATIVE: {
3571 SweepConservatively(space, p);
3572 break;
3573 }
3574 case LAZY_CONSERVATIVE: {
3575 freed_bytes += SweepConservatively(space, p);
3576 if (freed_bytes >= newspace_size && p != space->LastPage()) {
3577 space->SetPagesToSweep(p->next_page(), space->anchor());
3578 lazy_sweeping_active = true;
2375 } 3579 }
2376 LiveObjectList::ProcessNonLive(object); 3580 break;
2377 } 3581 }
2378 // The object is now unmarked for the call to Size() at the top of the 3582 case PRECISE: {
2379 // loop. 3583 if (space->identity() == CODE_SPACE) {
2380 } 3584 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
2381 3585 } else {
2382 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop()) 3586 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
2383 || (!is_previous_alive && free_start == p->ObjectAreaStart());
2384
2385 if (page_is_empty) {
2386 // This page is empty. Check whether we are in the middle of
2387 // sequence of empty pages and start one if not.
2388 if (!first_empty_page->is_valid()) {
2389 first_empty_page = p;
2390 prec_first_empty_page = prev;
2391 }
2392
2393 if (!is_previous_alive) {
2394 // There are dead objects on this page. Update space accounting stats
2395 // without putting anything into free list.
2396 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
2397 if (size_in_bytes > 0) {
2398 space->DeallocateBlock(free_start, size_in_bytes, false);
2399 } 3587 }
2400 } 3588 break;
2401 } else { 3589 }
2402 // This page is not empty. Sequence of empty pages ended on the previous 3590 default: {
2403 // one. 3591 UNREACHABLE();
2404 if (first_empty_page->is_valid()) { 3592 }
2405 space->FreePages(prec_first_empty_page, prev); 3593 }
2406 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL); 3594 }
2407 } 3595
2408 3596 // Give pages that are queued to be freed back to the OS.
2409 // If there is a free ending area on one of the previous pages we have 3597 heap()->FreeQueuedChunks();
2410 // deallocate that area and put it on the free list. 3598 }
2411 if (last_free_size > 0) {
2412 Page::FromAddress(last_free_start)->
2413 SetAllocationWatermark(last_free_start);
2414 space->DeallocateBlock(last_free_start, last_free_size, true);
2415 last_free_start = NULL;
2416 last_free_size = 0;
2417 }
2418
2419 // If the last region of this page was not live we remember it.
2420 if (!is_previous_alive) {
2421 ASSERT(last_free_size == 0);
2422 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
2423 last_free_start = free_start;
2424 }
2425 }
2426
2427 prev = p;
2428 }
2429
2430 // We reached end of space. See if we need to adjust allocation top.
2431 Address new_allocation_top = NULL;
2432
2433 if (first_empty_page->is_valid()) {
2434 // Last used pages in space are empty. We can move allocation top backwards
2435 // to the beginning of first empty page.
2436 ASSERT(prev == space->AllocationTopPage());
2437
2438 new_allocation_top = first_empty_page->ObjectAreaStart();
2439 }
2440
2441 if (last_free_size > 0) {
2442 // There was a free ending area on the previous page.
2443 // Deallocate it without putting it into freelist and move allocation
2444 // top to the beginning of this free area.
2445 space->DeallocateBlock(last_free_start, last_free_size, false);
2446 new_allocation_top = last_free_start;
2447 }
2448
2449 if (new_allocation_top != NULL) {
2450 #ifdef DEBUG
2451 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
2452 if (!first_empty_page->is_valid()) {
2453 ASSERT(new_allocation_top_page == space->AllocationTopPage());
2454 } else if (last_free_size > 0) {
2455 ASSERT(new_allocation_top_page == prec_first_empty_page);
2456 } else {
2457 ASSERT(new_allocation_top_page == first_empty_page);
2458 }
2459 #endif
2460
2461 space->SetTop(new_allocation_top);
2462 }
2463 }
2464
2465
2466 void MarkCompactCollector::EncodeForwardingAddresses() {
2467 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2468 // Objects in the active semispace of the young generation may be
2469 // relocated to the inactive semispace (if not promoted). Set the
2470 // relocation info to the beginning of the inactive semispace.
2471 heap()->new_space()->MCResetRelocationInfo();
2472
2473 // Compute the forwarding pointers in each space.
2474 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
2475 ReportDeleteIfNeeded>(
2476 heap()->old_pointer_space());
2477
2478 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
2479 IgnoreNonLiveObject>(
2480 heap()->old_data_space());
2481
2482 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
2483 ReportDeleteIfNeeded>(
2484 heap()->code_space());
2485
2486 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
2487 IgnoreNonLiveObject>(
2488 heap()->cell_space());
2489
2490
2491 // Compute new space next to last after the old and code spaces have been
2492 // compacted. Objects in new space can be promoted to old or code space.
2493 EncodeForwardingAddressesInNewSpace();
2494
2495 // Compute map space last because computing forwarding addresses
2496 // overwrites non-live objects. Objects in the other spaces rely on
2497 // non-live map pointers to get the sizes of non-live objects.
2498 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
2499 IgnoreNonLiveObject>(
2500 heap()->map_space());
2501
2502 // Write relocation info to the top page, so we can use it later. This is
2503 // done after promoting objects from the new space so we get the correct
2504 // allocation top.
2505 heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
2506 heap()->old_data_space()->MCWriteRelocationInfoToPage();
2507 heap()->code_space()->MCWriteRelocationInfoToPage();
2508 heap()->map_space()->MCWriteRelocationInfoToPage();
2509 heap()->cell_space()->MCWriteRelocationInfoToPage();
2510 }
2511
2512
2513 class MapIterator : public HeapObjectIterator {
2514 public:
2515 explicit MapIterator(Heap* heap)
2516 : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
2517
2518 MapIterator(Heap* heap, Address start)
2519 : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
2520
2521 private:
2522 static int SizeCallback(HeapObject* unused) {
2523 USE(unused);
2524 return Map::kSize;
2525 }
2526 };
2527
2528
2529 class MapCompact {
2530 public:
2531 explicit MapCompact(Heap* heap, int live_maps)
2532 : heap_(heap),
2533 live_maps_(live_maps),
2534 to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
2535 vacant_map_it_(heap),
2536 map_to_evacuate_it_(heap, to_evacuate_start_),
2537 first_map_to_evacuate_(
2538 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
2539 }
2540
2541 void CompactMaps() {
2542 // As we know the number of maps to evacuate beforehand,
2543 // we stop then there is no more vacant maps.
2544 for (Map* next_vacant_map = NextVacantMap();
2545 next_vacant_map;
2546 next_vacant_map = NextVacantMap()) {
2547 EvacuateMap(next_vacant_map, NextMapToEvacuate());
2548 }
2549
2550 #ifdef DEBUG
2551 CheckNoMapsToEvacuate();
2552 #endif
2553 }
2554
2555 void UpdateMapPointersInRoots() {
2556 MapUpdatingVisitor map_updating_visitor;
2557 heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
2558 heap()->isolate()->global_handles()->IterateWeakRoots(
2559 &map_updating_visitor);
2560 LiveObjectList::IterateElements(&map_updating_visitor);
2561 }
2562
2563 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
2564 ASSERT(space != heap()->map_space());
2565
2566 PageIterator it(space, PageIterator::PAGES_IN_USE);
2567 while (it.has_next()) {
2568 Page* p = it.next();
2569 UpdateMapPointersInRange(heap(),
2570 p->ObjectAreaStart(),
2571 p->AllocationTop());
2572 }
2573 }
2574
2575 void UpdateMapPointersInNewSpace() {
2576 NewSpace* space = heap()->new_space();
2577 UpdateMapPointersInRange(heap(), space->bottom(), space->top());
2578 }
2579
2580 void UpdateMapPointersInLargeObjectSpace() {
2581 LargeObjectIterator it(heap()->lo_space());
2582 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
2583 UpdateMapPointersInObject(heap(), obj);
2584 }
2585
2586 void Finish() {
2587 heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
2588 }
2589
2590 inline Heap* heap() const { return heap_; }
2591
2592 private:
2593 Heap* heap_;
2594 int live_maps_;
2595 Address to_evacuate_start_;
2596 MapIterator vacant_map_it_;
2597 MapIterator map_to_evacuate_it_;
2598 Map* first_map_to_evacuate_;
2599
2600 // Helper class for updating map pointers in HeapObjects.
2601 class MapUpdatingVisitor: public ObjectVisitor {
2602 public:
2603 MapUpdatingVisitor() {}
2604
2605 void VisitPointer(Object** p) {
2606 UpdateMapPointer(p);
2607 }
2608
2609 void VisitPointers(Object** start, Object** end) {
2610 for (Object** p = start; p < end; p++) UpdateMapPointer(p);
2611 }
2612
2613 private:
2614 void UpdateMapPointer(Object** p) {
2615 if (!(*p)->IsHeapObject()) return;
2616 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
2617
2618 // Moved maps are tagged with overflowed map word. They are the only
2619 // objects those map word is overflowed as marking is already complete.
2620 MapWord map_word = old_map->map_word();
2621 if (!map_word.IsOverflowed()) return;
2622
2623 *p = GetForwardedMap(map_word);
2624 }
2625 };
2626
2627 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
2628 while (true) {
2629 HeapObject* next = it->next();
2630 ASSERT(next != NULL);
2631 if (next == last)
2632 return NULL;
2633 ASSERT(!next->IsOverflowed());
2634 ASSERT(!next->IsMarked());
2635 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
2636 if (next->IsMap() == live)
2637 return reinterpret_cast<Map*>(next);
2638 }
2639 }
2640
2641 Map* NextVacantMap() {
2642 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
2643 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
2644 return map;
2645 }
2646
2647 Map* NextMapToEvacuate() {
2648 Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
2649 ASSERT(map != NULL);
2650 ASSERT(map->IsMap());
2651 return map;
2652 }
2653
2654 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
2655 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
2656 ASSERT(map_to_evacuate->IsMap());
2657
2658 ASSERT(Map::kSize % 4 == 0);
2659
2660 map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
2661 vacant_map->address(), map_to_evacuate->address(), Map::kSize);
2662
2663 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
2664
2665 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
2666 forwarding_map_word.SetOverflow();
2667 map_to_evacuate->set_map_word(forwarding_map_word);
2668
2669 ASSERT(map_to_evacuate->map_word().IsOverflowed());
2670 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
2671 }
2672
2673 static Map* GetForwardedMap(MapWord map_word) {
2674 ASSERT(map_word.IsOverflowed());
2675 map_word.ClearOverflow();
2676 Map* new_map = map_word.ToMap();
2677 ASSERT_MAP_ALIGNED(new_map->address());
2678 return new_map;
2679 }
2680
2681 static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
2682 ASSERT(!obj->IsMarked());
2683 Map* map = obj->map();
2684 ASSERT(heap->map_space()->Contains(map));
2685 MapWord map_word = map->map_word();
2686 ASSERT(!map_word.IsMarked());
2687 if (map_word.IsOverflowed()) {
2688 Map* new_map = GetForwardedMap(map_word);
2689 ASSERT(heap->map_space()->Contains(new_map));
2690 obj->set_map(new_map);
2691
2692 #ifdef DEBUG
2693 if (FLAG_gc_verbose) {
2694 PrintF("update %p : %p -> %p\n",
2695 obj->address(),
2696 reinterpret_cast<void*>(map),
2697 reinterpret_cast<void*>(new_map));
2698 }
2699 #endif
2700 }
2701
2702 int size = obj->SizeFromMap(map);
2703 MapUpdatingVisitor map_updating_visitor;
2704 obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
2705 return size;
2706 }
2707
2708 static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
2709 HeapObject* object;
2710 int size;
2711 for (Address current = start; current < end; current += size) {
2712 object = HeapObject::FromAddress(current);
2713 size = UpdateMapPointersInObject(heap, object);
2714 ASSERT(size > 0);
2715 }
2716 }
2717
2718 #ifdef DEBUG
2719 void CheckNoMapsToEvacuate() {
2720 if (!FLAG_enable_slow_asserts)
2721 return;
2722
2723 for (HeapObject* obj = map_to_evacuate_it_.next();
2724 obj != NULL; obj = map_to_evacuate_it_.next())
2725 ASSERT(FreeListNode::IsFreeListNode(obj));
2726 }
2727 #endif
2728 };
2729 3599
2730 3600
2731 void MarkCompactCollector::SweepSpaces() { 3601 void MarkCompactCollector::SweepSpaces() {
2732 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); 3602 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2733 3603 #ifdef DEBUG
2734 ASSERT(state_ == SWEEP_SPACES); 3604 state_ = SWEEP_SPACES;
2735 ASSERT(!IsCompacting()); 3605 #endif
3606 SweeperType how_to_sweep =
3607 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
3608 if (sweep_precisely_) how_to_sweep = PRECISE;
2736 // Noncompacting collections simply sweep the spaces to clear the mark 3609 // Noncompacting collections simply sweep the spaces to clear the mark
2737 // bits and free the nonlive blocks (for old and map spaces). We sweep 3610 // bits and free the nonlive blocks (for old and map spaces). We sweep
2738 // the map space last because freeing non-live maps overwrites them and 3611 // the map space last because freeing non-live maps overwrites them and
2739 // the other spaces rely on possibly non-live maps to get the sizes for 3612 // the other spaces rely on possibly non-live maps to get the sizes for
2740 // non-live objects. 3613 // non-live objects.
2741 SweepSpace(heap(), heap()->old_pointer_space()); 3614 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
2742 SweepSpace(heap(), heap()->old_data_space()); 3615 SweepSpace(heap()->old_data_space(), how_to_sweep);
2743 SweepSpace(heap(), heap()->code_space()); 3616
2744 SweepSpace(heap(), heap()->cell_space()); 3617 RemoveDeadInvalidatedCode();
3618 SweepSpace(heap()->code_space(), PRECISE);
3619
3620 SweepSpace(heap()->cell_space(), PRECISE);
3621
2745 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); 3622 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2746 SweepNewSpace(heap(), heap()->new_space()); 3623 EvacuateNewSpaceAndCandidates();
2747 } 3624 }
2748 SweepSpace(heap(), heap()->map_space()); 3625
2749 3626 // ClearNonLiveTransitions depends on precise sweeping of map space to
2750 heap()->IterateDirtyRegions(heap()->map_space(), 3627 // detect whether unmarked map became dead in this collection or in one
2751 &heap()->IteratePointersInDirtyMapsRegion, 3628 // of the previous ones.
2752 &UpdatePointerToNewGen, 3629 SweepSpace(heap()->map_space(), PRECISE);
2753 heap()->WATERMARK_SHOULD_BE_VALID); 3630
2754 3631 ASSERT(live_map_objects_size_ <= heap()->map_space()->Size());
2755 intptr_t live_maps_size = heap()->map_space()->Size(); 3632
2756 int live_maps = static_cast<int>(live_maps_size / Map::kSize); 3633 // Deallocate unmarked objects and clear marked bits for marked objects.
2757 ASSERT(live_map_objects_size_ == live_maps_size); 3634 heap_->lo_space()->FreeUnmarkedObjects();
2758
2759 if (heap()->map_space()->NeedsCompaction(live_maps)) {
2760 MapCompact map_compact(heap(), live_maps);
2761
2762 map_compact.CompactMaps();
2763 map_compact.UpdateMapPointersInRoots();
2764
2765 PagedSpaces spaces;
2766 for (PagedSpace* space = spaces.next();
2767 space != NULL; space = spaces.next()) {
2768 if (space == heap()->map_space()) continue;
2769 map_compact.UpdateMapPointersInPagedSpace(space);
2770 }
2771 map_compact.UpdateMapPointersInNewSpace();
2772 map_compact.UpdateMapPointersInLargeObjectSpace();
2773
2774 map_compact.Finish();
2775 }
2776 }
2777
2778
2779 // Iterate the live objects in a range of addresses (eg, a page or a
2780 // semispace). The live regions of the range have been linked into a list.
2781 // The first live region is [first_live_start, first_live_end), and the last
2782 // address in the range is top. The callback function is used to get the
2783 // size of each live object.
2784 int MarkCompactCollector::IterateLiveObjectsInRange(
2785 Address start,
2786 Address end,
2787 LiveObjectCallback size_func) {
2788 int live_objects_size = 0;
2789 Address current = start;
2790 while (current < end) {
2791 uint32_t encoded_map = Memory::uint32_at(current);
2792 if (encoded_map == kSingleFreeEncoding) {
2793 current += kPointerSize;
2794 } else if (encoded_map == kMultiFreeEncoding) {
2795 current += Memory::int_at(current + kIntSize);
2796 } else {
2797 int size = (this->*size_func)(HeapObject::FromAddress(current));
2798 current += size;
2799 live_objects_size += size;
2800 }
2801 }
2802 return live_objects_size;
2803 }
2804
2805
2806 int MarkCompactCollector::IterateLiveObjects(
2807 NewSpace* space, LiveObjectCallback size_f) {
2808 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2809 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2810 }
2811
2812
2813 int MarkCompactCollector::IterateLiveObjects(
2814 PagedSpace* space, LiveObjectCallback size_f) {
2815 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2816 int total = 0;
2817 PageIterator it(space, PageIterator::PAGES_IN_USE);
2818 while (it.has_next()) {
2819 Page* p = it.next();
2820 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2821 p->AllocationTop(),
2822 size_f);
2823 }
2824 return total;
2825 }
2826
2827
2828 // -------------------------------------------------------------------------
2829 // Phase 3: Update pointers
2830
2831 // Helper class for updating pointers in HeapObjects.
2832 class UpdatingVisitor: public ObjectVisitor {
2833 public:
2834 explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
2835
2836 void VisitPointer(Object** p) {
2837 UpdatePointer(p);
2838 }
2839
2840 void VisitPointers(Object** start, Object** end) {
2841 // Mark all HeapObject pointers in [start, end)
2842 for (Object** p = start; p < end; p++) UpdatePointer(p);
2843 }
2844
2845 void VisitCodeTarget(RelocInfo* rinfo) {
2846 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2847 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2848 VisitPointer(&target);
2849 rinfo->set_target_address(
2850 reinterpret_cast<Code*>(target)->instruction_start());
2851 }
2852
2853 void VisitDebugTarget(RelocInfo* rinfo) {
2854 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2855 rinfo->IsPatchedReturnSequence()) ||
2856 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2857 rinfo->IsPatchedDebugBreakSlotSequence()));
2858 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2859 VisitPointer(&target);
2860 rinfo->set_call_address(
2861 reinterpret_cast<Code*>(target)->instruction_start());
2862 }
2863
2864 inline Heap* heap() const { return heap_; }
2865
2866 private:
2867 void UpdatePointer(Object** p) {
2868 if (!(*p)->IsHeapObject()) return;
2869
2870 HeapObject* obj = HeapObject::cast(*p);
2871 Address old_addr = obj->address();
2872 Address new_addr;
2873 ASSERT(!heap()->InFromSpace(obj));
2874
2875 if (heap()->new_space()->Contains(obj)) {
2876 Address forwarding_pointer_addr =
2877 heap()->new_space()->FromSpaceLow() +
2878 heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
2879 new_addr = Memory::Address_at(forwarding_pointer_addr);
2880
2881 #ifdef DEBUG
2882 ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
2883 heap()->old_data_space()->Contains(new_addr) ||
2884 heap()->new_space()->FromSpaceContains(new_addr) ||
2885 heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
2886
2887 if (heap()->new_space()->FromSpaceContains(new_addr)) {
2888 ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
2889 heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
2890 }
2891 #endif
2892
2893 } else if (heap()->lo_space()->Contains(obj)) {
2894 // Don't move objects in the large object space.
2895 return;
2896
2897 } else {
2898 #ifdef DEBUG
2899 PagedSpaces spaces;
2900 PagedSpace* original_space = spaces.next();
2901 while (original_space != NULL) {
2902 if (original_space->Contains(obj)) break;
2903 original_space = spaces.next();
2904 }
2905 ASSERT(original_space != NULL);
2906 #endif
2907 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2908 ASSERT(original_space->Contains(new_addr));
2909 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2910 original_space->MCSpaceOffsetForAddress(old_addr));
2911 }
2912
2913 *p = HeapObject::FromAddress(new_addr);
2914
2915 #ifdef DEBUG
2916 if (FLAG_gc_verbose) {
2917 PrintF("update %p : %p -> %p\n",
2918 reinterpret_cast<Address>(p), old_addr, new_addr);
2919 }
2920 #endif
2921 }
2922
2923 Heap* heap_;
2924 };
2925
2926
2927 void MarkCompactCollector::UpdatePointers() {
2928 #ifdef DEBUG
2929 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2930 state_ = UPDATE_POINTERS;
2931 #endif
2932 UpdatingVisitor updating_visitor(heap());
2933 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
2934 &updating_visitor);
2935 heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
2936 heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
2937
2938 // Update the pointer to the head of the weak list of global contexts.
2939 updating_visitor.VisitPointer(&heap()->global_contexts_list_);
2940
2941 LiveObjectList::IterateElements(&updating_visitor);
2942
2943 int live_maps_size = IterateLiveObjects(
2944 heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2945 int live_pointer_olds_size = IterateLiveObjects(
2946 heap()->old_pointer_space(),
2947 &MarkCompactCollector::UpdatePointersInOldObject);
2948 int live_data_olds_size = IterateLiveObjects(
2949 heap()->old_data_space(),
2950 &MarkCompactCollector::UpdatePointersInOldObject);
2951 int live_codes_size = IterateLiveObjects(
2952 heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2953 int live_cells_size = IterateLiveObjects(
2954 heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2955 int live_news_size = IterateLiveObjects(
2956 heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
2957
2958 // Large objects do not move, the map word can be updated directly.
2959 LargeObjectIterator it(heap()->lo_space());
2960 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
2961 UpdatePointersInNewObject(obj);
2962 }
2963
2964 USE(live_maps_size);
2965 USE(live_pointer_olds_size);
2966 USE(live_data_olds_size);
2967 USE(live_codes_size);
2968 USE(live_cells_size);
2969 USE(live_news_size);
2970 ASSERT(live_maps_size == live_map_objects_size_);
2971 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2972 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2973 ASSERT(live_codes_size == live_code_objects_size_);
2974 ASSERT(live_cells_size == live_cell_objects_size_);
2975 ASSERT(live_news_size == live_young_objects_size_);
2976 }
2977
2978
2979 int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
2980 // Keep old map pointers
2981 Map* old_map = obj->map();
2982 ASSERT(old_map->IsHeapObject());
2983
2984 Address forwarded = GetForwardingAddressInOldSpace(old_map);
2985
2986 ASSERT(heap()->map_space()->Contains(old_map));
2987 ASSERT(heap()->map_space()->Contains(forwarded));
2988 #ifdef DEBUG
2989 if (FLAG_gc_verbose) {
2990 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
2991 forwarded);
2992 }
2993 #endif
2994 // Update the map pointer.
2995 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
2996
2997 // We have to compute the object size relying on the old map because
2998 // map objects are not relocated yet.
2999 int obj_size = obj->SizeFromMap(old_map);
3000
3001 // Update pointers in the object body.
3002 UpdatingVisitor updating_visitor(heap());
3003 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
3004 return obj_size;
3005 }
3006
3007
3008 int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
3009 // Decode the map pointer.
3010 MapWord encoding = obj->map_word();
3011 Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3012 ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
3013
3014 // At this point, the first word of map_addr is also encoded, cannot
3015 // cast it to Map* using Map::cast.
3016 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
3017 int obj_size = obj->SizeFromMap(map);
3018 InstanceType type = map->instance_type();
3019
3020 // Update map pointer.
3021 Address new_map_addr = GetForwardingAddressInOldSpace(map);
3022 int offset = encoding.DecodeOffset();
3023 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
3024
3025 #ifdef DEBUG
3026 if (FLAG_gc_verbose) {
3027 PrintF("update %p : %p -> %p\n", obj->address(),
3028 map_addr, new_map_addr);
3029 }
3030 #endif
3031
3032 // Update pointers in the object body.
3033 UpdatingVisitor updating_visitor(heap());
3034 obj->IterateBody(type, obj_size, &updating_visitor);
3035 return obj_size;
3036 }
3037
3038
3039 Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
3040 // Object should either in old or map space.
3041 MapWord encoding = obj->map_word();
3042
3043 // Offset to the first live object's forwarding address.
3044 int offset = encoding.DecodeOffset();
3045 Address obj_addr = obj->address();
3046
3047 // Find the first live object's forwarding address.
3048 Page* p = Page::FromAddress(obj_addr);
3049 Address first_forwarded = p->mc_first_forwarded;
3050
3051 // Page start address of forwarded address.
3052 Page* forwarded_page = Page::FromAddress(first_forwarded);
3053 int forwarded_offset = forwarded_page->Offset(first_forwarded);
3054
3055 // Find end of allocation in the page of first_forwarded.
3056 int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
3057
3058 // Check if current object's forward pointer is in the same page
3059 // as the first live object's forwarding pointer
3060 if (forwarded_offset + offset < mc_top_offset) {
3061 // In the same page.
3062 return first_forwarded + offset;
3063 }
3064
3065 // Must be in the next page, NOTE: this may cross chunks.
3066 Page* next_page = forwarded_page->next_page();
3067 ASSERT(next_page->is_valid());
3068
3069 offset -= (mc_top_offset - forwarded_offset);
3070 offset += Page::kObjectStartOffset;
3071
3072 ASSERT_PAGE_OFFSET(offset);
3073 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
3074
3075 return next_page->OffsetToAddress(offset);
3076 }
3077
3078
3079 // -------------------------------------------------------------------------
3080 // Phase 4: Relocate objects
3081
3082 void MarkCompactCollector::RelocateObjects() {
3083 #ifdef DEBUG
3084 ASSERT(state_ == UPDATE_POINTERS);
3085 state_ = RELOCATE_OBJECTS;
3086 #endif
3087 // Relocates objects, always relocate map objects first. Relocating
3088 // objects in other space relies on map objects to get object size.
3089 int live_maps_size = IterateLiveObjects(
3090 heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
3091 int live_pointer_olds_size = IterateLiveObjects(
3092 heap()->old_pointer_space(),
3093 &MarkCompactCollector::RelocateOldPointerObject);
3094 int live_data_olds_size = IterateLiveObjects(
3095 heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
3096 int live_codes_size = IterateLiveObjects(
3097 heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
3098 int live_cells_size = IterateLiveObjects(
3099 heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
3100 int live_news_size = IterateLiveObjects(
3101 heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
3102
3103 USE(live_maps_size);
3104 USE(live_pointer_olds_size);
3105 USE(live_data_olds_size);
3106 USE(live_codes_size);
3107 USE(live_cells_size);
3108 USE(live_news_size);
3109 ASSERT(live_maps_size == live_map_objects_size_);
3110 ASSERT(live_data_olds_size == live_old_data_objects_size_);
3111 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
3112 ASSERT(live_codes_size == live_code_objects_size_);
3113 ASSERT(live_cells_size == live_cell_objects_size_);
3114 ASSERT(live_news_size == live_young_objects_size_);
3115
3116 // Flip from and to spaces
3117 heap()->new_space()->Flip();
3118
3119 heap()->new_space()->MCCommitRelocationInfo();
3120
3121 // Set age_mark to bottom in to space
3122 Address mark = heap()->new_space()->bottom();
3123 heap()->new_space()->set_age_mark(mark);
3124
3125 PagedSpaces spaces;
3126 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
3127 space->MCCommitRelocationInfo();
3128
3129 heap()->CheckNewSpaceExpansionCriteria();
3130 heap()->IncrementYoungSurvivorsCounter(live_news_size);
3131 }
3132
3133
3134 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
3135 // Recover map pointer.
3136 MapWord encoding = obj->map_word();
3137 Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3138 ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
3139
3140 // Get forwarding address before resetting map pointer
3141 Address new_addr = GetForwardingAddressInOldSpace(obj);
3142
3143 // Reset map pointer. The meta map object may not be copied yet so
3144 // Map::cast does not yet work.
3145 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
3146
3147 Address old_addr = obj->address();
3148
3149 if (new_addr != old_addr) {
3150 // Move contents.
3151 heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
3152 old_addr,
3153 Map::kSize);
3154 }
3155
3156 #ifdef DEBUG
3157 if (FLAG_gc_verbose) {
3158 PrintF("relocate %p -> %p\n", old_addr, new_addr);
3159 }
3160 #endif
3161
3162 return Map::kSize;
3163 }
3164
3165
3166 static inline int RestoreMap(HeapObject* obj,
3167 PagedSpace* space,
3168 Address new_addr,
3169 Address map_addr) {
3170 // This must be a non-map object, and the function relies on the
3171 // assumption that the Map space is compacted before the other paged
3172 // spaces (see RelocateObjects).
3173
3174 // Reset map pointer.
3175 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
3176
3177 int obj_size = obj->Size();
3178 ASSERT_OBJECT_SIZE(obj_size);
3179
3180 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
3181 space->MCSpaceOffsetForAddress(obj->address()));
3182
3183 #ifdef DEBUG
3184 if (FLAG_gc_verbose) {
3185 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
3186 }
3187 #endif
3188
3189 return obj_size;
3190 }
3191
3192
3193 int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
3194 PagedSpace* space) {
3195 // Recover map pointer.
3196 MapWord encoding = obj->map_word();
3197 Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3198 ASSERT(heap()->map_space()->Contains(map_addr));
3199
3200 // Get forwarding address before resetting map pointer.
3201 Address new_addr = GetForwardingAddressInOldSpace(obj);
3202
3203 // Reset the map pointer.
3204 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
3205
3206 Address old_addr = obj->address();
3207
3208 if (new_addr != old_addr) {
3209 // Move contents.
3210 if (space == heap()->old_data_space()) {
3211 heap()->MoveBlock(new_addr, old_addr, obj_size);
3212 } else {
3213 heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
3214 old_addr,
3215 obj_size);
3216 }
3217 }
3218
3219 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
3220
3221 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3222 if (copied_to->IsSharedFunctionInfo()) {
3223 PROFILE(heap()->isolate(),
3224 SharedFunctionInfoMoveEvent(old_addr, new_addr));
3225 }
3226 HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
3227
3228 return obj_size;
3229 }
3230
3231
3232 int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
3233 return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
3234 }
3235
3236
3237 int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
3238 return RelocateOldNonCodeObject(obj, heap()->old_data_space());
3239 }
3240
3241
3242 int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
3243 return RelocateOldNonCodeObject(obj, heap()->cell_space());
3244 }
3245
3246
3247 int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
3248 // Recover map pointer.
3249 MapWord encoding = obj->map_word();
3250 Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3251 ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
3252
3253 // Get forwarding address before resetting map pointer
3254 Address new_addr = GetForwardingAddressInOldSpace(obj);
3255
3256 // Reset the map pointer.
3257 int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
3258
3259 Address old_addr = obj->address();
3260
3261 if (new_addr != old_addr) {
3262 // Move contents.
3263 heap()->MoveBlock(new_addr, old_addr, obj_size);
3264 }
3265
3266 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3267 if (copied_to->IsCode()) {
3268 // May also update inline cache target.
3269 Code::cast(copied_to)->Relocate(new_addr - old_addr);
3270 // Notify the logger that compiled code has moved.
3271 PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
3272 }
3273 HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
3274
3275 return obj_size;
3276 }
3277
3278
3279 int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
3280 int obj_size = obj->Size();
3281
3282 // Get forwarding address
3283 Address old_addr = obj->address();
3284 int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
3285
3286 Address new_addr =
3287 Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
3288
3289 #ifdef DEBUG
3290 if (heap()->new_space()->FromSpaceContains(new_addr)) {
3291 ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
3292 heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
3293 } else {
3294 ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
3295 heap()->TargetSpace(obj) == heap()->old_data_space());
3296 }
3297 #endif
3298
3299 // New and old addresses cannot overlap.
3300 if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
3301 heap()->CopyBlock(new_addr, old_addr, obj_size);
3302 } else {
3303 heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
3304 old_addr,
3305 obj_size);
3306 }
3307
3308 #ifdef DEBUG
3309 if (FLAG_gc_verbose) {
3310 PrintF("relocate %p -> %p\n", old_addr, new_addr);
3311 }
3312 #endif
3313
3314 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3315 if (copied_to->IsSharedFunctionInfo()) {
3316 PROFILE(heap()->isolate(),
3317 SharedFunctionInfoMoveEvent(old_addr, new_addr));
3318 }
3319 HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
3320
3321 return obj_size;
3322 } 3635 }
3323 3636
3324 3637
3325 void MarkCompactCollector::EnableCodeFlushing(bool enable) { 3638 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
3326 if (enable) { 3639 if (enable) {
3327 if (code_flusher_ != NULL) return; 3640 if (code_flusher_ != NULL) return;
3328 code_flusher_ = new CodeFlusher(heap()->isolate()); 3641 code_flusher_ = new CodeFlusher(heap()->isolate());
3329 } else { 3642 } else {
3330 if (code_flusher_ == NULL) return; 3643 if (code_flusher_ == NULL) return;
3331 delete code_flusher_; 3644 delete code_flusher_;
3332 code_flusher_ = NULL; 3645 code_flusher_ = NULL;
3333 } 3646 }
3334 } 3647 }
3335 3648
3336 3649
3650 // TODO(1466) ReportDeleteIfNeeded is not called currently.
3651 // Our profiling tools do not expect intersections between
3652 // code objects. We should either reenable it or change our tools.
3337 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, 3653 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
3338 Isolate* isolate) { 3654 Isolate* isolate) {
3339 #ifdef ENABLE_GDB_JIT_INTERFACE 3655 #ifdef ENABLE_GDB_JIT_INTERFACE
3340 if (obj->IsCode()) { 3656 if (obj->IsCode()) {
3341 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); 3657 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
3342 } 3658 }
3343 #endif 3659 #endif
3344 if (obj->IsCode()) { 3660 if (obj->IsCode()) {
3345 PROFILE(isolate, CodeDeleteEvent(obj->address())); 3661 PROFILE(isolate, CodeDeleteEvent(obj->address()));
3346 } 3662 }
3347 } 3663 }
3348 3664
3349 3665
3350 int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
3351 MapWord map_word = obj->map_word();
3352 map_word.ClearMark();
3353 return obj->SizeFromMap(map_word.ToMap());
3354 }
3355
3356
3357 void MarkCompactCollector::Initialize() { 3666 void MarkCompactCollector::Initialize() {
3358 StaticPointersToNewGenUpdatingVisitor::Initialize();
3359 StaticMarkingVisitor::Initialize(); 3667 StaticMarkingVisitor::Initialize();
3360 } 3668 }
3361 3669
3362 3670
3671 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
3672 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
3673 }
3674
3675
3676 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
3677 SlotsBuffer** buffer_address,
3678 SlotType type,
3679 Address addr,
3680 AdditionMode mode) {
3681 SlotsBuffer* buffer = *buffer_address;
3682 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
3683 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
3684 allocator->DeallocateChain(buffer_address);
3685 return false;
3686 }
3687 buffer = allocator->AllocateBuffer(buffer);
3688 *buffer_address = buffer;
3689 }
3690 ASSERT(buffer->HasSpaceForTypedSlot());
3691 buffer->Add(reinterpret_cast<ObjectSlot>(type));
3692 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
3693 return true;
3694 }
3695
3696
3697 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
3698 if (RelocInfo::IsCodeTarget(rmode)) {
3699 return SlotsBuffer::CODE_TARGET_SLOT;
3700 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
3701 return SlotsBuffer::DEBUG_TARGET_SLOT;
3702 } else if (RelocInfo::IsJSReturn(rmode)) {
3703 return SlotsBuffer::JS_RETURN_SLOT;
3704 }
3705 UNREACHABLE();
3706 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
3707 }
3708
3709
3710 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Code* target) {
3711 Page* target_page = Page::FromAddress(
3712 reinterpret_cast<Address>(target));
3713 if (target_page->IsEvacuationCandidate() &&
3714 (rinfo->host() == NULL ||
3715 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
3716 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
3717 target_page->slots_buffer_address(),
3718 SlotTypeForRMode(rinfo->rmode()),
3719 rinfo->pc(),
3720 SlotsBuffer::FAIL_ON_OVERFLOW)) {
3721 EvictEvacuationCandidate(target_page);
3722 }
3723 }
3724 }
3725
3726
3727 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
3728 Page* target_page = Page::FromAddress(
3729 reinterpret_cast<Address>(target));
3730 if (target_page->IsEvacuationCandidate() &&
3731 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
3732 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
3733 target_page->slots_buffer_address(),
3734 SlotsBuffer::CODE_ENTRY_SLOT,
3735 slot,
3736 SlotsBuffer::FAIL_ON_OVERFLOW)) {
3737 EvictEvacuationCandidate(target_page);
3738 }
3739 }
3740 }
3741
3742
3743 static inline SlotsBuffer::SlotType DecodeSlotType(
3744 SlotsBuffer::ObjectSlot slot) {
3745 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
3746 }
3747
3748
3749 void SlotsBuffer::UpdateSlots(Heap* heap) {
3750 PointersUpdatingVisitor v(heap);
3751
3752 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
3753 ObjectSlot slot = slots_[slot_idx];
3754 if (!IsTypedSlot(slot)) {
3755 PointersUpdatingVisitor::UpdateSlot(heap, slot);
3756 } else {
3757 ++slot_idx;
3758 ASSERT(slot_idx < idx_);
3759 UpdateSlot(&v,
3760 DecodeSlotType(slot),
3761 reinterpret_cast<Address>(slots_[slot_idx]));
3762 }
3763 }
3764 }
3765
3766
3767 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
3768 PointersUpdatingVisitor v(heap);
3769
3770 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
3771 ObjectSlot slot = slots_[slot_idx];
3772 if (!IsTypedSlot(slot)) {
3773 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
3774 PointersUpdatingVisitor::UpdateSlot(heap, slot);
3775 }
3776 } else {
3777 ++slot_idx;
3778 ASSERT(slot_idx < idx_);
3779 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
3780 if (!IsOnInvalidatedCodeObject(pc)) {
3781 UpdateSlot(&v,
3782 DecodeSlotType(slot),
3783 reinterpret_cast<Address>(slots_[slot_idx]));
3784 }
3785 }
3786 }
3787 }
3788
3789
3790 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
3791 return new SlotsBuffer(next_buffer);
3792 }
3793
3794
3795 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
3796 delete buffer;
3797 }
3798
3799
3800 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
3801 SlotsBuffer* buffer = *buffer_address;
3802 while (buffer != NULL) {
3803 SlotsBuffer* next_buffer = buffer->next();
3804 DeallocateBuffer(buffer);
3805 buffer = next_buffer;
3806 }
3807 *buffer_address = NULL;
3808 }
3809
3810
3363 } } // namespace v8::internal 3811 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mark-compact.h ('k') | src/mark-compact-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698