Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(8)

Side by Side Diff: src/mark-compact.cc

Issue 7189066: Simple non-incremental compaction by evacuation. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
50 const char* Marking::kImpossibleBitPattern = "01"; 50 const char* Marking::kImpossibleBitPattern = "01";
51 51
52 52
53 // ------------------------------------------------------------------------- 53 // -------------------------------------------------------------------------
54 // MarkCompactCollector 54 // MarkCompactCollector
55 55
56 MarkCompactCollector::MarkCompactCollector() : // NOLINT 56 MarkCompactCollector::MarkCompactCollector() : // NOLINT
57 #ifdef DEBUG 57 #ifdef DEBUG
58 state_(IDLE), 58 state_(IDLE),
59 #endif 59 #endif
60 force_compaction_(false),
61 compacting_collection_(false),
62 compact_on_next_gc_(false),
63 previous_marked_count_(0),
64 tracer_(NULL), 60 tracer_(NULL),
65 #ifdef DEBUG 61 #ifdef DEBUG
66 live_young_objects_size_(0), 62 live_young_objects_size_(0),
67 live_old_pointer_objects_size_(0), 63 live_old_pointer_objects_size_(0),
68 live_old_data_objects_size_(0), 64 live_old_data_objects_size_(0),
69 live_code_objects_size_(0), 65 live_code_objects_size_(0),
70 live_map_objects_size_(0), 66 live_map_objects_size_(0),
71 live_cell_objects_size_(0), 67 live_cell_objects_size_(0),
72 live_lo_objects_size_(0), 68 live_lo_objects_size_(0),
73 live_bytes_(0), 69 live_bytes_(0),
(...skipping 18 matching lines...) Expand all
92 88
93 static void VerifyMarking(Address bottom, Address top) { 89 static void VerifyMarking(Address bottom, Address top) {
94 VerifyMarkingVisitor visitor; 90 VerifyMarkingVisitor visitor;
95 HeapObject* object; 91 HeapObject* object;
96 Address next_object_must_be_here_or_later = bottom; 92 Address next_object_must_be_here_or_later = bottom;
97 93
98 for (Address current = bottom; 94 for (Address current = bottom;
99 current < top; 95 current < top;
100 current += kPointerSize) { 96 current += kPointerSize) {
101 object = HeapObject::FromAddress(current); 97 object = HeapObject::FromAddress(current);
102 if (HEAP->mark_compact_collector()->IsMarked(object)) { 98 if (MarkCompactCollector::IsMarked(object)) {
103 ASSERT(current >= next_object_must_be_here_or_later); 99 ASSERT(current >= next_object_must_be_here_or_later);
104 object->Iterate(&visitor); 100 object->Iterate(&visitor);
105 next_object_must_be_here_or_later = current + object->Size(); 101 next_object_must_be_here_or_later = current + object->Size();
106 } 102 }
107 } 103 }
108 } 104 }
109 105
110 106
111 static void VerifyMarking(Page* p) { 107 static void VerifyMarking(Page* p) {
112 VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd()); 108 VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd());
(...skipping 18 matching lines...) Expand all
131 127
132 static void VerifyMarking(PagedSpace* space) { 128 static void VerifyMarking(PagedSpace* space) {
133 PageIterator it(space); 129 PageIterator it(space);
134 130
135 while (it.has_next()) { 131 while (it.has_next()) {
136 VerifyMarking(it.next()); 132 VerifyMarking(it.next());
137 } 133 }
138 } 134 }
139 135
140 136
141 static void VerifyMarking() { 137 static void VerifyMarking(Heap* heap) {
142 // TODO(gc) ISOLATES 138 VerifyMarking(heap->old_pointer_space());
143 VerifyMarking(HEAP->old_pointer_space()); 139 VerifyMarking(heap->old_data_space());
144 VerifyMarking(HEAP->old_data_space()); 140 VerifyMarking(heap->code_space());
145 VerifyMarking(HEAP->code_space()); 141 VerifyMarking(heap->cell_space());
146 VerifyMarking(HEAP->cell_space()); 142 VerifyMarking(heap->map_space());
147 VerifyMarking(HEAP->map_space()); 143 VerifyMarking(heap->new_space());
148 VerifyMarking(HEAP->new_space());
149 144
150 VerifyMarkingVisitor visitor; 145 VerifyMarkingVisitor visitor;
151 HEAP->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); 146 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
147 }
148
149
150 class VerifyEvacuationVisitor: public ObjectVisitor {
151 public:
152 void VisitPointers(Object** start, Object** end) {
153 for (Object** current = start; current < end; current++) {
154 if ((*current)->IsHeapObject()) {
155 HeapObject* object = HeapObject::cast(*current);
156 if (MarkCompactCollector::IsOnEvacuationCandidate(object)) {
157 HEAP->TracePathToObject(source_);
158 CHECK(false);
159 }
160 }
161 }
162 }
163
164 HeapObject* source_;
165 };
166
167
168 static void VerifyEvacuation(Address bottom, Address top) {
169 VerifyEvacuationVisitor visitor;
170 HeapObject* object;
171 Address next_object_must_be_here_or_later = bottom;
172
173 for (Address current = bottom;
174 current < top;
175 current += kPointerSize) {
176 object = HeapObject::FromAddress(current);
177 if (MarkCompactCollector::IsMarked(object)) {
178 ASSERT(current >= next_object_must_be_here_or_later);
179 visitor.source_ = object;
180 object->Iterate(&visitor);
181 next_object_must_be_here_or_later = current + object->Size();
182 }
183 }
184 }
185
186
187 static void VerifyEvacuation(Page* p) {
188 if (p->IsEvacuationCandidate()) return;
189
190 VerifyEvacuation(p->ObjectAreaStart(), p->ObjectAreaEnd());
191 }
192
193
194 static void VerifyEvacuation(NewSpace* space) {
195 // TODO(gc): XXX
Erik Corry 2011/06/20 20:41:26 Add text or remove XXX
Vyacheslav Egorov (Chromium) 2011/06/21 11:44:48 Done.
196 }
197
198
199 static void VerifyEvacuation(PagedSpace* space) {
200 PageIterator it(space);
201
202 while (it.has_next()) {
203 VerifyEvacuation(it.next());
204 }
205 }
206
207
208 static void VerifyEvacuation(Heap* heap) {
209 VerifyEvacuation(heap->old_pointer_space());
210 VerifyEvacuation(heap->old_data_space());
211 VerifyEvacuation(heap->code_space());
212 VerifyEvacuation(heap->cell_space());
213 VerifyEvacuation(heap->map_space());
214 VerifyEvacuation(heap->new_space());
215
216 VerifyEvacuationVisitor visitor;
217 heap->IterateStrongRoots(&visitor, VISIT_ALL);
152 } 218 }
153 #endif 219 #endif
154 220
221
222 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
223 p->MarkEvacuationCandidate();
224 evacuation_candidates_.Add(p);
225 }
226
227
155 void MarkCompactCollector::CollectGarbage() { 228 void MarkCompactCollector::CollectGarbage() {
156 // Make sure that Prepare() has been called. The individual steps below will 229 // Make sure that Prepare() has been called. The individual steps below will
157 // update the state as they proceed. 230 // update the state as they proceed.
158 ASSERT(state_ == PREPARE_GC); 231 ASSERT(state_ == PREPARE_GC);
159 232
160 // Prepare has selected whether to compact the old generation or not.
161 // Tell the tracer.
162 if (IsCompacting()) tracer_->set_is_compacting();
163
164 MarkLiveObjects(); 233 MarkLiveObjects();
165 ASSERT(heap_->incremental_marking()->IsStopped()); 234 ASSERT(heap_->incremental_marking()->IsStopped());
166 235
167 if (FLAG_collect_maps) ClearNonLiveTransitions(); 236 if (FLAG_collect_maps) ClearNonLiveTransitions();
168 237
169 #ifdef DEBUG 238 #ifdef DEBUG
170 VerifyMarking(); 239 VerifyMarking(heap_);
171 #endif 240 #endif
172 241
173 SweepSpaces(); 242 SweepSpaces();
174 243
175 heap_->isolate()->pc_to_code_cache()->Flush(); 244 heap_->isolate()->pc_to_code_cache()->Flush();
176 245
177 Finish(); 246 Finish();
178 247
179 // Check that swept all marked objects and 248 // Null out the GC tracer.
Erik Corry 2011/06/20 20:41:26 Comment doesnt really add much. Why are we nullin
Vyacheslav Egorov (Chromium) 2011/06/21 11:44:48 Cause it's stack allocated.
180 // null out the GC tracer.
181 // TODO(gc) does not work with conservative sweeping.
182 // ASSERT(tracer_->marked_count() == 0);
183 tracer_ = NULL; 249 tracer_ = NULL;
184 } 250 }
185 251
186 252
187 #ifdef DEBUG 253 #ifdef DEBUG
188 static void VerifyMarkbitsAreClean(PagedSpace* space) { 254 static void VerifyMarkbitsAreClean(PagedSpace* space) {
189 PageIterator it(space); 255 PageIterator it(space);
190 256
191 while (it.has_next()) { 257 while (it.has_next()) {
192 Page* p = it.next(); 258 Page* p = it.next();
193 ASSERT(p->markbits()->IsClean()); 259 ASSERT(p->markbits()->IsClean());
194 } 260 }
195 } 261 }
196 262
197 static void VerifyMarkbitsAreClean(NewSpace* space) { 263 static void VerifyMarkbitsAreClean(NewSpace* space) {
198 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); 264 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
199 265
200 while (it.has_next()) { 266 while (it.has_next()) {
201 NewSpacePage* p = it.next(); 267 NewSpacePage* p = it.next();
202 ASSERT(p->markbits()->IsClean()); 268 ASSERT(p->markbits()->IsClean());
203 } 269 }
204 } 270 }
205 271
206 static void VerifyMarkbitsAreClean() { 272 static void VerifyMarkbitsAreClean(Heap* heap) {
207 VerifyMarkbitsAreClean(HEAP->old_pointer_space()); 273 VerifyMarkbitsAreClean(heap->old_pointer_space());
208 VerifyMarkbitsAreClean(HEAP->old_data_space()); 274 VerifyMarkbitsAreClean(heap->old_data_space());
209 VerifyMarkbitsAreClean(HEAP->code_space()); 275 VerifyMarkbitsAreClean(heap->code_space());
210 VerifyMarkbitsAreClean(HEAP->cell_space()); 276 VerifyMarkbitsAreClean(heap->cell_space());
211 VerifyMarkbitsAreClean(HEAP->map_space()); 277 VerifyMarkbitsAreClean(heap->map_space());
212 VerifyMarkbitsAreClean(HEAP->new_space()); 278 VerifyMarkbitsAreClean(heap->new_space());
213 } 279 }
214 #endif 280 #endif
215 281
216 282
217 static void ClearMarkbits(PagedSpace* space) { 283 static void ClearMarkbits(PagedSpace* space) {
218 PageIterator it(space); 284 PageIterator it(space);
219 285
220 while (it.has_next()) { 286 while (it.has_next()) {
221 Page* p = it.next(); 287 Page* p = it.next();
222 p->markbits()->Clear(); 288 p->markbits()->Clear();
223 } 289 }
224 } 290 }
225 291
226 292
227 static void ClearMarkbits(NewSpace* space) { 293 static void ClearMarkbits(NewSpace* space) {
228 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); 294 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
229 295
230 while (it.has_next()) { 296 while (it.has_next()) {
231 NewSpacePage* p = it.next(); 297 NewSpacePage* p = it.next();
232 p->markbits()->Clear(); 298 p->markbits()->Clear();
233 } 299 }
234 } 300 }
235 301
236 302
237 static void ClearMarkbits() { 303 static void ClearMarkbits(Heap* heap) {
238 // TODO(gc): Clean the mark bits while sweeping. 304 // TODO(gc): Clean the mark bits while sweeping.
239 Heap* heap = HEAP;
240 ClearMarkbits(heap->code_space()); 305 ClearMarkbits(heap->code_space());
241 ClearMarkbits(heap->map_space()); 306 ClearMarkbits(heap->map_space());
242 ClearMarkbits(heap->old_pointer_space()); 307 ClearMarkbits(heap->old_pointer_space());
243 ClearMarkbits(heap->old_data_space()); 308 ClearMarkbits(heap->old_data_space());
244 ClearMarkbits(heap->cell_space()); 309 ClearMarkbits(heap->cell_space());
245 ClearMarkbits(heap->new_space()); 310 ClearMarkbits(heap->new_space());
246 } 311 }
247 312
248 313
249 void Marking::TransferMark(Address old_start, Address new_start) { 314 void Marking::TransferMark(Address old_start, Address new_start) {
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
281 MarkBit old_mark_bit = MarkBitFrom(old_start); 346 MarkBit old_mark_bit = MarkBitFrom(old_start);
282 if (!old_mark_bit.Get()) { 347 if (!old_mark_bit.Get()) {
283 return; 348 return;
284 } 349 }
285 } 350 }
286 new_mark_bit.Set(); 351 new_mark_bit.Set();
287 } 352 }
288 } 353 }
289 354
290 355
356 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
357 ASSERT(space->identity() == OLD_POINTER_SPACE ||
358 space->identity() == OLD_DATA_SPACE);
359
360 PageIterator it(space);
361 while (it.has_next()) {
362 Page* p = it.next();
363 if (space->IsFragmented(p)) {
364 AddEvacuationCandidate(p);
365 } else {
366 p->ClearEvacuationCandidate();
367 }
368 }
369 }
370
371
372 static void ClearEvacuationCandidates(PagedSpace* space) {
373 ASSERT(space->identity() == OLD_POINTER_SPACE ||
374 space->identity() == OLD_DATA_SPACE);
375
376 PageIterator it(space);
377 while (it.has_next()) {
378 Page* p = it.next();
379 p->ClearEvacuationCandidate();
380 }
381 }
382
383
291 void MarkCompactCollector::Prepare(GCTracer* tracer) { 384 void MarkCompactCollector::Prepare(GCTracer* tracer) {
385 // TODO(gc) re-enable code flushing.
292 FLAG_flush_code = false; 386 FLAG_flush_code = false;
293 FLAG_always_compact = false; 387 FLAG_always_compact = false;
294 FLAG_never_compact = true;
295 388
296 // Disable collection of maps if incremental marking is enabled. 389 // Disable collection of maps if incremental marking is enabled.
297 // TODO(gc) improve maps collection algorithm to work with incremental 390 // TODO(gc) improve maps collection algorithm to work with incremental
298 // marking. 391 // marking.
299 if (FLAG_incremental_marking) FLAG_collect_maps = false; 392 if (FLAG_incremental_marking) FLAG_collect_maps = false;
300 393
301 // Rather than passing the tracer around we stash it in a static member 394 // Rather than passing the tracer around we stash it in a static member
302 // variable. 395 // variable.
303 tracer_ = tracer; 396 tracer_ = tracer;
304 397
305 #ifdef DEBUG 398 #ifdef DEBUG
306 ASSERT(state_ == IDLE); 399 ASSERT(state_ == IDLE);
307 state_ = PREPARE_GC; 400 state_ = PREPARE_GC;
308 #endif 401 #endif
309 ASSERT(!FLAG_always_compact || !FLAG_never_compact); 402 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
310 403
311 compacting_collection_ =
312 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
313 compact_on_next_gc_ = false;
314
315 if (FLAG_never_compact) compacting_collection_ = false;
316 if (!heap()->map_space()->MapPointersEncodable()) {
317 compacting_collection_ = false;
318 }
319 if (FLAG_collect_maps) CreateBackPointers(); 404 if (FLAG_collect_maps) CreateBackPointers();
320 #ifdef ENABLE_GDB_JIT_INTERFACE 405 #ifdef ENABLE_GDB_JIT_INTERFACE
321 if (FLAG_gdbjit) { 406 if (FLAG_gdbjit) {
322 // If GDBJIT interface is active disable compaction. 407 // If GDBJIT interface is active disable compaction.
323 compacting_collection_ = false; 408 compacting_collection_ = false;
324 } 409 }
325 #endif 410 #endif
326 411
412 if (!FLAG_never_compact) {
413 slots_buffer_.Clear();
414 evacuation_candidates_.Rewind(0);
415
416 if (!heap()->incremental_marking()->IsMarking()) {
417 CollectEvacuationCandidates(heap()->old_pointer_space());
418 CollectEvacuationCandidates(heap()->old_data_space());
419 } else {
420 ClearEvacuationCandidates(heap()->old_pointer_space());
421 ClearEvacuationCandidates(heap()->old_data_space());
422 }
423 }
424
327 PagedSpaces spaces; 425 PagedSpaces spaces;
328 for (PagedSpace* space = spaces.next(); 426 for (PagedSpace* space = spaces.next();
329 space != NULL; space = spaces.next()) { 427 space != NULL;
330 space->PrepareForMarkCompact(compacting_collection_); 428 space = spaces.next()) {
429 space->PrepareForMarkCompact();
331 } 430 }
332 431
333 if (!heap()->incremental_marking()->IsMarking()) { 432 if (!heap()->incremental_marking()->IsMarking()) {
334 ClearMarkbits(); 433 ClearMarkbits(heap_);
335 #ifdef DEBUG 434 #ifdef DEBUG
336 VerifyMarkbitsAreClean(); 435 VerifyMarkbitsAreClean(heap_);
337 #endif 436 #endif
338 } 437 }
339 438
340 #ifdef DEBUG 439 #ifdef DEBUG
341 live_bytes_ = 0; 440 live_bytes_ = 0;
342 live_young_objects_size_ = 0; 441 live_young_objects_size_ = 0;
343 live_old_pointer_objects_size_ = 0; 442 live_old_pointer_objects_size_ = 0;
344 live_old_data_objects_size_ = 0; 443 live_old_data_objects_size_ = 0;
345 live_code_objects_size_ = 0; 444 live_code_objects_size_ = 0;
346 live_map_objects_size_ = 0; 445 live_map_objects_size_ = 0;
347 live_cell_objects_size_ = 0; 446 live_cell_objects_size_ = 0;
348 live_lo_objects_size_ = 0; 447 live_lo_objects_size_ = 0;
349 #endif 448 #endif
350 } 449 }
351 450
352 451
353 void MarkCompactCollector::Finish() { 452 void MarkCompactCollector::Finish() {
354 #ifdef DEBUG 453 #ifdef DEBUG
355 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); 454 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
356 state_ = IDLE; 455 state_ = IDLE;
357 #endif 456 #endif
358 // The stub cache is not traversed during GC; clear the cache to 457 // The stub cache is not traversed during GC; clear the cache to
359 // force lazy re-initialization of it. This must be done after the 458 // force lazy re-initialization of it. This must be done after the
360 // GC, because it relies on the new address of certain old space 459 // GC, because it relies on the new address of certain old space
361 // objects (empty string, illegal builtin). 460 // objects (empty string, illegal builtin).
362 heap()->isolate()->stub_cache()->Clear(); 461 heap()->isolate()->stub_cache()->Clear();
363 462
364 heap()->external_string_table_.CleanUp(); 463 heap()->external_string_table_.CleanUp();
365
366 // If we've just compacted old space there's no reason to check the
367 // fragmentation limit. Just return.
368 if (HasCompacted()) return;
369
370 // We compact the old generation on the next GC if it has gotten too
371 // fragmented (ie, we could recover an expected amount of space by
372 // reclaiming the waste and free list blocks).
373 static const int kFragmentationLimit = 15; // Percent.
374 static const int kFragmentationAllowed = 1 * MB; // Absolute.
375 intptr_t old_gen_recoverable = 0;
376 intptr_t old_gen_used = 0;
377
378 OldSpaces spaces;
379 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
380 old_gen_recoverable += space->Waste() + space->Available();
381 old_gen_used += space->Size();
382 }
383
384 int old_gen_fragmentation =
385 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
386 if (old_gen_fragmentation > kFragmentationLimit &&
387 old_gen_recoverable > kFragmentationAllowed) {
388 compact_on_next_gc_ = true;
389 }
390 } 464 }
391 465
392 466
393 // ------------------------------------------------------------------------- 467 // -------------------------------------------------------------------------
394 // Phase 1: tracing and marking live objects. 468 // Phase 1: tracing and marking live objects.
395 // before: all objects are in normal state. 469 // before: all objects are in normal state.
396 // after: a live object's map pointer is marked as '00'. 470 // after: a live object's map pointer is marked as '00'.
397 471
398 // Marking all live objects in the heap as part of mark-sweep or mark-compact 472 // Marking all live objects in the heap as part of mark-sweep or mark-compact
399 // collection. Before marking, all objects are in their normal state. After 473 // collection. Before marking, all objects are in their normal state. After
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
550 // (ConsString::cast(object)->second() == HEAP->empty_string()) 624 // (ConsString::cast(object)->second() == HEAP->empty_string())
551 // except the maps for the object and its possible substrings might be 625 // except the maps for the object and its possible substrings might be
552 // marked. 626 // marked.
553 HeapObject* object = HeapObject::cast(*p); 627 HeapObject* object = HeapObject::cast(*p);
554 Map* map = object->map(); 628 Map* map = object->map();
555 InstanceType type = map->instance_type(); 629 InstanceType type = map->instance_type();
556 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; 630 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
557 631
558 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); 632 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
559 Heap* heap = map->GetHeap(); 633 Heap* heap = map->GetHeap();
560 if (second != heap->raw_unchecked_empty_string()) { 634 if (second != heap->empty_string()) {
561 return object; 635 return object;
562 } 636 }
563 637
564 // Since we don't have the object's start, it is impossible to update the 638 // Since we don't have the object's start, it is impossible to update the
565 // page dirty marks. Therefore, we only replace the string with its left 639 // page dirty marks. Therefore, we only replace the string with its left
566 // substring when page dirty marks do not change. 640 // substring when page dirty marks do not change.
567 // TODO(gc): Seems like we could relax this restriction with store buffers. 641 // TODO(gc): Seems like we could relax this restriction with store buffers.
568 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first(); 642 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
569 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object; 643 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
570 644
(...skipping 18 matching lines...) Expand all
589 &FixedBodyVisitor<StaticMarkingVisitor, 663 &FixedBodyVisitor<StaticMarkingVisitor,
590 ConsString::BodyDescriptor, 664 ConsString::BodyDescriptor,
591 void>::Visit); 665 void>::Visit);
592 666
593 667
594 table_.Register(kVisitFixedArray, 668 table_.Register(kVisitFixedArray,
595 &FlexibleBodyVisitor<StaticMarkingVisitor, 669 &FlexibleBodyVisitor<StaticMarkingVisitor,
596 FixedArray::BodyDescriptor, 670 FixedArray::BodyDescriptor,
597 void>::Visit); 671 void>::Visit);
598 672
599 table_.Register(kVisitGlobalContext, 673 table_.Register(kVisitGlobalContext, &VisitGlobalContext);
600 &FixedBodyVisitor<StaticMarkingVisitor,
601 Context::MarkCompactBodyDescriptor,
602 void>::Visit);
603 674
604 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); 675 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
605 table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit); 676 table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
606 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); 677 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
607 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); 678 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
608 679
609 table_.Register(kVisitOddball, 680 table_.Register(kVisitOddball,
610 &FixedBodyVisitor<StaticMarkingVisitor, 681 &FixedBodyVisitor<StaticMarkingVisitor,
611 Oddball::BodyDescriptor, 682 Oddball::BodyDescriptor,
612 void>::Visit); 683 void>::Visit);
(...skipping 22 matching lines...) Expand all
635 table_.RegisterSpecializations<JSObjectVisitor, 706 table_.RegisterSpecializations<JSObjectVisitor,
636 kVisitJSObject, 707 kVisitJSObject,
637 kVisitJSObjectGeneric>(); 708 kVisitJSObjectGeneric>();
638 709
639 table_.RegisterSpecializations<StructObjectVisitor, 710 table_.RegisterSpecializations<StructObjectVisitor,
640 kVisitStruct, 711 kVisitStruct,
641 kVisitStructGeneric>(); 712 kVisitStructGeneric>();
642 } 713 }
643 714
644 INLINE(static void VisitPointer(Heap* heap, Object** p)) { 715 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
645 MarkObjectByPointer(heap, p); 716 MarkObjectByPointer(heap, reinterpret_cast<Address>(p), p);
646 } 717 }
647 718
648 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { 719 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
649 // Mark all objects pointed to in [start, end). 720 // Mark all objects pointed to in [start, end).
650 const int kMinRangeForMarkingRecursion = 64; 721 const int kMinRangeForMarkingRecursion = 64;
651 if (end - start >= kMinRangeForMarkingRecursion) { 722 if (end - start >= kMinRangeForMarkingRecursion) {
652 if (VisitUnmarkedObjects(heap, start, end)) return; 723 if (VisitUnmarkedObjects(heap, start, end)) return;
653 // We are close to a stack overflow, so just mark the objects. 724 // We are close to a stack overflow, so just mark the objects.
654 } 725 }
655 for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p); 726 for (Object** p = start; p < end; p++) {
727 MarkObjectByPointer(heap, reinterpret_cast<Address>(start), p);
Erik Corry 2011/06/20 20:41:26 Seems like everywhere you call this you are castin
Vyacheslav Egorov (Chromium) 2011/06/21 11:44:48 Done.
728 }
656 } 729 }
657 730
658 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) { 731 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
659 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); 732 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
660 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address()); 733 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
661 if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) { 734 if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
662 IC::Clear(rinfo->pc()); 735 IC::Clear(rinfo->pc());
663 // Please note targets for cleared inline cached do not have to be 736 // Please note targets for cleared inline cached do not have to be
664 // marked since they are contained in HEAP->non_monomorphic_cache(). 737 // marked since they are contained in HEAP->non_monomorphic_cache().
665 } else { 738 } else {
(...skipping 17 matching lines...) Expand all
683 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && 756 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
684 rinfo->IsPatchedReturnSequence()) || 757 rinfo->IsPatchedReturnSequence()) ||
685 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && 758 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
686 rinfo->IsPatchedDebugBreakSlotSequence())); 759 rinfo->IsPatchedDebugBreakSlotSequence()));
687 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address()); 760 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
688 MarkBit code_mark = Marking::MarkBitFrom(code); 761 MarkBit code_mark = Marking::MarkBitFrom(code);
689 heap->mark_compact_collector()->MarkObject(code, code_mark); 762 heap->mark_compact_collector()->MarkObject(code, code_mark);
690 } 763 }
691 764
692 // Mark object pointed to by p. 765 // Mark object pointed to by p.
693 INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) { 766 INLINE(static void MarkObjectByPointer(Heap* heap,
767 Address anchor,
768 Object** p)) {
694 if (!(*p)->IsHeapObject()) return; 769 if (!(*p)->IsHeapObject()) return;
695 HeapObject* object = ShortCircuitConsString(p); 770 HeapObject* object = ShortCircuitConsString(p);
771 heap->mark_compact_collector()->RecordSlot(anchor, p, object);
696 MarkBit mark = Marking::MarkBitFrom(object); 772 MarkBit mark = Marking::MarkBitFrom(object);
697 heap->mark_compact_collector()->MarkObject(object, mark); 773 heap->mark_compact_collector()->MarkObject(object, mark);
698 } 774 }
699 775
700 776
701 // Visit an unmarked object. 777 // Visit an unmarked object.
702 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, 778 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
703 HeapObject* obj)) { 779 HeapObject* obj)) {
704 #ifdef DEBUG 780 #ifdef DEBUG
705 ASSERT(Isolate::Current()->heap()->Contains(obj)); 781 ASSERT(Isolate::Current()->heap()->Contains(obj));
(...skipping 16 matching lines...) Expand all
722 Object** end) { 798 Object** end) {
723 // Return false is we are close to the stack limit. 799 // Return false is we are close to the stack limit.
724 StackLimitCheck check(heap->isolate()); 800 StackLimitCheck check(heap->isolate());
725 if (check.HasOverflowed()) return false; 801 if (check.HasOverflowed()) return false;
726 802
727 MarkCompactCollector* collector = heap->mark_compact_collector(); 803 MarkCompactCollector* collector = heap->mark_compact_collector();
728 // Visit the unmarked objects. 804 // Visit the unmarked objects.
729 for (Object** p = start; p < end; p++) { 805 for (Object** p = start; p < end; p++) {
730 Object* o = *p; 806 Object* o = *p;
731 if (!o->IsHeapObject()) continue; 807 if (!o->IsHeapObject()) continue;
808 heap->mark_compact_collector()->RecordSlot(
809 reinterpret_cast<Address>(start),
810 p,
811 o);
732 HeapObject* obj = HeapObject::cast(o); 812 HeapObject* obj = HeapObject::cast(o);
733 MarkBit mark = Marking::MarkBitFrom(obj); 813 MarkBit mark = Marking::MarkBitFrom(obj);
734 if (mark.Get()) continue; 814 if (mark.Get()) continue;
735 VisitUnmarkedObject(collector, obj); 815 VisitUnmarkedObject(collector, obj);
736 } 816 }
737 return true; 817 return true;
738 } 818 }
739 819
740 static inline void VisitExternalReference(Address* p) { } 820 static inline void VisitExternalReference(Address* p) { }
741 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { } 821 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
(...skipping 22 matching lines...) Expand all
764 map->GetHeap()); 844 map->GetHeap());
765 } 845 }
766 846
767 // Code flushing support. 847 // Code flushing support.
768 848
769 // How many collections newly compiled code object will survive before being 849 // How many collections newly compiled code object will survive before being
770 // flushed. 850 // flushed.
771 static const int kCodeAgeThreshold = 5; 851 static const int kCodeAgeThreshold = 5;
772 852
773 inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) { 853 inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
774 Object* undefined = heap->raw_unchecked_undefined_value(); 854 Object* undefined = heap->undefined_value();
775 return (info->script() != undefined) && 855 return (info->script() != undefined) &&
776 (reinterpret_cast<Script*>(info->script())->source() != undefined); 856 (reinterpret_cast<Script*>(info->script())->source() != undefined);
777 } 857 }
778 858
779 859
780 inline static bool IsCompiled(JSFunction* function) { 860 inline static bool IsCompiled(JSFunction* function) {
781 return function->unchecked_code() != 861 return function->unchecked_code() !=
782 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile); 862 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
783 } 863 }
784 864
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
818 } 898 }
819 899
820 // The function must be compiled and have the source code available, 900 // The function must be compiled and have the source code available,
821 // to be able to recompile it in case we need the function again. 901 // to be able to recompile it in case we need the function again.
822 if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) { 902 if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
823 return false; 903 return false;
824 } 904 }
825 905
826 // We never flush code for Api functions. 906 // We never flush code for Api functions.
827 Object* function_data = shared_info->function_data(); 907 Object* function_data = shared_info->function_data();
828 if (function_data->IsHeapObject() && 908 if (function_data->IsFunctionTemplateInfo()) return false;
829 (SafeMap(function_data)->instance_type() ==
830 FUNCTION_TEMPLATE_INFO_TYPE)) {
831 return false;
832 }
833 909
834 // Only flush code for functions. 910 // Only flush code for functions.
835 if (shared_info->code()->kind() != Code::FUNCTION) return false; 911 if (shared_info->code()->kind() != Code::FUNCTION) return false;
836 912
837 // Function must be lazy compilable. 913 // Function must be lazy compilable.
838 if (!shared_info->allows_lazy_compilation()) return false; 914 if (!shared_info->allows_lazy_compilation()) return false;
839 915
840 // If this is a full script wrapped in a function we do no flush the code. 916 // If this is a full script wrapped in a function we do no flush the code.
841 if (shared_info->is_toplevel()) return false; 917 if (shared_info->is_toplevel()) return false;
842 918
(...skipping 12 matching lines...) Expand all
855 931
856 // This function's code looks flushable. But we have to postpone the 932 // This function's code looks flushable. But we have to postpone the
857 // decision until we see all functions that point to the same 933 // decision until we see all functions that point to the same
858 // SharedFunctionInfo because some of them might be optimized. 934 // SharedFunctionInfo because some of them might be optimized.
859 // That would make the nonoptimized version of the code nonflushable, 935 // That would make the nonoptimized version of the code nonflushable,
860 // because it is required for bailing out from optimized code. 936 // because it is required for bailing out from optimized code.
861 heap->mark_compact_collector()->code_flusher()->AddCandidate(function); 937 heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
862 return true; 938 return true;
863 } 939 }
864 940
865 941 static inline bool IsValidNotBuiltinContext(Object* ctx) {
866 static inline Map* SafeMap(Object* obj) { 942 return ctx->IsContext() &&
867 return HeapObject::cast(obj)->map(); 943 !Context::cast(ctx)->global()->IsJSBuiltinsObject();
868 } 944 }
869 945
870 946
871 static inline bool IsJSBuiltinsObject(Object* obj) {
872 return obj->IsHeapObject() &&
873 (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
874 }
875
876
877 static inline bool IsValidNotBuiltinContext(Object* ctx) {
878 if (!ctx->IsHeapObject()) return false;
879
880 Map* map = SafeMap(ctx);
881 Heap* heap = HeapObject::cast(ctx)->GetHeap();
882 if (!(map == heap->raw_unchecked_context_map() ||
883 map == heap->raw_unchecked_catch_context_map() ||
884 map == heap->raw_unchecked_global_context_map())) {
885 return false;
886 }
887
888 Context* context = reinterpret_cast<Context*>(ctx);
889
890 if (IsJSBuiltinsObject(context->global())) {
891 return false;
892 }
893
894 return true;
895 }
896
897
898 static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) { 947 static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
899 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); 948 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
900 949
901 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap(); 950 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
902 951
903 FixedBodyVisitor<StaticMarkingVisitor, 952 FixedBodyVisitor<StaticMarkingVisitor,
904 SharedFunctionInfo::BodyDescriptor, 953 SharedFunctionInfo::BodyDescriptor,
905 void>::Visit(map, object); 954 void>::Visit(map, object);
906 } 955 }
907 956
(...skipping 30 matching lines...) Expand all
938 static void VisitCodeEntry(Heap* heap, Address entry_address) { 987 static void VisitCodeEntry(Heap* heap, Address entry_address) {
939 Object* code = Code::GetObjectFromEntryAddress(entry_address); 988 Object* code = Code::GetObjectFromEntryAddress(entry_address);
940 Object* old_code = code; 989 Object* old_code = code;
941 VisitPointer(heap, &code); 990 VisitPointer(heap, &code);
942 if (code != old_code) { 991 if (code != old_code) {
943 Memory::Address_at(entry_address) = 992 Memory::Address_at(entry_address) =
944 reinterpret_cast<Code*>(code)->entry(); 993 reinterpret_cast<Code*>(code)->entry();
945 } 994 }
946 } 995 }
947 996
997 static void VisitGlobalContext(Map* map, HeapObject* object) {
998 FixedBodyVisitor<StaticMarkingVisitor,
999 Context::MarkCompactBodyDescriptor,
1000 void>::Visit(map, object);
1001
1002 for (int idx = Context::FIRST_WEAK_SLOT;
1003 idx < Context::GLOBAL_CONTEXT_SLOTS;
1004 ++idx) {
1005 Object** slot =
1006 HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
1007 map->GetHeap()->mark_compact_collector()->RecordSlot(
1008 object->address(), slot, *slot);
1009 }
1010 }
948 1011
949 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) { 1012 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
950 Heap* heap = map->GetHeap(); 1013 Heap* heap = map->GetHeap();
951 MarkCompactCollector* collector = heap->mark_compact_collector(); 1014 MarkCompactCollector* collector = heap->mark_compact_collector();
952 if (!collector->is_code_flushing_enabled()) { 1015 if (!collector->is_code_flushing_enabled()) {
953 VisitJSFunction(map, object); 1016 VisitJSFunction(map, object);
954 return; 1017 return;
955 } 1018 }
956 1019
957 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object); 1020 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
958 // The function must have a valid context and not be a builtin. 1021 // The function must have a valid context and not be a builtin.
959 bool flush_code_candidate = false; 1022 bool flush_code_candidate = false;
960 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) { 1023 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
961 flush_code_candidate = FlushCodeForFunction(heap, jsfunction); 1024 flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
962 } 1025 }
963 1026
964 if (!flush_code_candidate) { 1027 if (!flush_code_candidate) {
965 Code* code = jsfunction->unchecked_shared()->unchecked_code(); 1028 Code* code = jsfunction->unchecked_shared()->unchecked_code();
966 MarkBit code_mark = Marking::MarkBitFrom(code); 1029 MarkBit code_mark = Marking::MarkBitFrom(code);
967 HEAP->mark_compact_collector()->MarkObject(code, code_mark); 1030 heap->mark_compact_collector()->MarkObject(code, code_mark);
968 1031
969 if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) { 1032 if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
970 // For optimized functions we should retain both non-optimized version 1033 // For optimized functions we should retain both non-optimized version
971 // of it's code and non-optimized version of all inlined functions. 1034 // of it's code and non-optimized version of all inlined functions.
972 // This is required to support bailing out from inlined code. 1035 // This is required to support bailing out from inlined code.
973 DeoptimizationInputData* data = 1036 DeoptimizationInputData* data =
974 reinterpret_cast<DeoptimizationInputData*>( 1037 reinterpret_cast<DeoptimizationInputData*>(
975 jsfunction->unchecked_code()->unchecked_deoptimization_data()); 1038 jsfunction->unchecked_code()->unchecked_deoptimization_data());
976 1039
977 FixedArray* literals = data->UncheckedLiteralArray(); 1040 FixedArray* literals = data->UncheckedLiteralArray();
978 1041
979 for (int i = 0, count = data->InlinedFunctionCount()->value(); 1042 for (int i = 0, count = data->InlinedFunctionCount()->value();
980 i < count; 1043 i < count;
981 i++) { 1044 i++) {
982 JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i)); 1045 JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
983 Code* inlined_code = inlined->unchecked_shared()->unchecked_code(); 1046 Code* inlined_code = inlined->unchecked_shared()->unchecked_code();
984 MarkBit inlined_code_mark = 1047 MarkBit inlined_code_mark =
985 Marking::MarkBitFrom(inlined_code); 1048 Marking::MarkBitFrom(inlined_code);
986 HEAP->mark_compact_collector()->MarkObject( 1049 heap->mark_compact_collector()->MarkObject(
987 inlined_code, inlined_code_mark); 1050 inlined_code, inlined_code_mark);
988 } 1051 }
989 } 1052 }
990 } 1053 }
991 1054
992 VisitJSFunctionFields(map, 1055 VisitJSFunctionFields(map,
993 reinterpret_cast<JSFunction*>(object), 1056 reinterpret_cast<JSFunction*>(object),
994 flush_code_candidate); 1057 flush_code_candidate);
995 } 1058 }
996 1059
(...skipping 24 matching lines...) Expand all
1021 // Don't visit code object. 1084 // Don't visit code object.
1022 1085
1023 // Visit shared function info to avoid double checking of it's 1086 // Visit shared function info to avoid double checking of it's
1024 // flushability. 1087 // flushability.
1025 SharedFunctionInfo* shared_info = object->unchecked_shared(); 1088 SharedFunctionInfo* shared_info = object->unchecked_shared();
1026 MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info); 1089 MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
1027 if (!shared_info_mark.Get()) { 1090 if (!shared_info_mark.Get()) {
1028 Map* shared_info_map = shared_info->map(); 1091 Map* shared_info_map = shared_info->map();
1029 MarkBit shared_info_map_mark = 1092 MarkBit shared_info_map_mark =
1030 Marking::MarkBitFrom(shared_info_map); 1093 Marking::MarkBitFrom(shared_info_map);
1031 HEAP->mark_compact_collector()->SetMark(shared_info, shared_info_mark); 1094 heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
1032 HEAP->mark_compact_collector()->MarkObject(shared_info_map, 1095 heap->mark_compact_collector()->MarkObject(shared_info_map,
1033 shared_info_map_mark); 1096 shared_info_map_mark);
1034 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map, 1097 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
1035 shared_info, 1098 shared_info,
1036 true); 1099 true);
1037 } 1100 }
1038 } 1101 }
1039 1102
1040 VisitPointers(heap, 1103 VisitPointers(heap,
1041 SLOT_ADDR(object, 1104 SLOT_ADDR(object,
1042 JSFunction::kCodeEntryOffset + kPointerSize), 1105 JSFunction::kCodeEntryOffset + kPointerSize),
1043 SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset)); 1106 SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
1044 1107
1045 // Don't visit the next function list field as it is a weak reference. 1108 // Don't visit the next function list field as it is a weak reference.
1109 Object** next_function = SLOT_ADDR(object,
1110 JSFunction::kNextFunctionLinkOffset);
1111 heap->mark_compact_collector()->RecordSlot(
1112 reinterpret_cast<Address>(object), next_function, *next_function);
1046 } 1113 }
1047 1114
1048 1115
1049 static void VisitSharedFunctionInfoFields(Heap* heap, 1116 static void VisitSharedFunctionInfoFields(Heap* heap,
1050 HeapObject* object, 1117 HeapObject* object,
1051 bool flush_code_candidate) { 1118 bool flush_code_candidate) {
1052 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset)); 1119 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
1053 1120
1054 if (!flush_code_candidate) { 1121 if (!flush_code_candidate) {
1055 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset)); 1122 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
1103 1170
1104 class CodeMarkingVisitor : public ThreadVisitor { 1171 class CodeMarkingVisitor : public ThreadVisitor {
1105 public: 1172 public:
1106 explicit CodeMarkingVisitor(MarkCompactCollector* collector) 1173 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1107 : collector_(collector) {} 1174 : collector_(collector) {}
1108 1175
1109 void VisitThread(Isolate* isolate, ThreadLocalTop* top) { 1176 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1110 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { 1177 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1111 Code* code = it.frame()->unchecked_code(); 1178 Code* code = it.frame()->unchecked_code();
1112 MarkBit code_bit = Marking::MarkBitFrom(code); 1179 MarkBit code_bit = Marking::MarkBitFrom(code);
1113 HEAP->mark_compact_collector()->MarkObject( 1180 collector_->MarkObject(it.frame()->unchecked_code(), code_bit);
1114 it.frame()->unchecked_code(), code_bit);
1115 } 1181 }
1116 } 1182 }
1117 1183
1118 private: 1184 private:
1119 MarkCompactCollector* collector_; 1185 MarkCompactCollector* collector_;
1120 }; 1186 };
1121 1187
1122 1188
1123 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { 1189 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1124 public: 1190 public:
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1157 if (heap()->isolate()->debug()->IsLoaded() || 1223 if (heap()->isolate()->debug()->IsLoaded() ||
1158 heap()->isolate()->debug()->has_break_points()) { 1224 heap()->isolate()->debug()->has_break_points()) {
1159 EnableCodeFlushing(false); 1225 EnableCodeFlushing(false);
1160 return; 1226 return;
1161 } 1227 }
1162 #endif 1228 #endif
1163 EnableCodeFlushing(true); 1229 EnableCodeFlushing(true);
1164 1230
1165 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray 1231 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1166 // relies on it being marked before any other descriptor array. 1232 // relies on it being marked before any other descriptor array.
1167 HeapObject* descriptor_array = heap()->raw_unchecked_empty_descriptor_array(); 1233 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1168 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array); 1234 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1169 MarkObject(descriptor_array, descriptor_array_mark); 1235 MarkObject(descriptor_array, descriptor_array_mark);
1170 1236
1171 // Make sure we are not referencing the code from the stack. 1237 // Make sure we are not referencing the code from the stack.
1172 ASSERT(this == heap()->mark_compact_collector()); 1238 ASSERT(this == heap()->mark_compact_collector());
1173 for (StackFrameIterator it; !it.done(); it.Advance()) { 1239 for (StackFrameIterator it; !it.done(); it.Advance()) {
1174 Code* code = it.frame()->unchecked_code(); 1240 Code* code = it.frame()->unchecked_code();
1175 MarkBit code_mark = Marking::MarkBitFrom(code); 1241 MarkBit code_mark = Marking::MarkBitFrom(code);
1176 MarkObject(code, code_mark); 1242 MarkObject(code, code_mark);
1177 } 1243 }
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
1245 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { 1311 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1246 // Check if the symbol being pruned is an external symbol. We need to 1312 // Check if the symbol being pruned is an external symbol. We need to
1247 // delete the associated external data as this symbol is going away. 1313 // delete the associated external data as this symbol is going away.
1248 1314
1249 // Since no objects have yet been moved we can safely access the map of 1315 // Since no objects have yet been moved we can safely access the map of
1250 // the object. 1316 // the object.
1251 if (o->IsExternalString()) { 1317 if (o->IsExternalString()) {
1252 heap_->FinalizeExternalString(String::cast(*p)); 1318 heap_->FinalizeExternalString(String::cast(*p));
1253 } 1319 }
1254 // Set the entry to null_value (as deleted). 1320 // Set the entry to null_value (as deleted).
1255 *p = heap_->raw_unchecked_null_value(); 1321 *p = heap_->null_value();
1256 pointers_removed_++; 1322 pointers_removed_++;
1257 } 1323 }
1258 } 1324 }
1259 } 1325 }
1260 1326
1261 int PointersRemoved() { 1327 int PointersRemoved() {
1262 return pointers_removed_; 1328 return pointers_removed_;
1263 } 1329 }
1264 private: 1330 private:
1265 Heap* heap_; 1331 Heap* heap_;
1266 int pointers_removed_; 1332 int pointers_removed_;
1267 }; 1333 };
1268 1334
1269 1335
1270 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects 1336 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1271 // are retained. 1337 // are retained.
1272 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { 1338 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1273 public: 1339 public:
1274 virtual Object* RetainAs(Object* object) { 1340 virtual Object* RetainAs(Object* object) {
1275 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) { 1341 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1276 return object; 1342 return object;
1277 } else { 1343 } else {
1278 return NULL; 1344 return NULL;
1279 } 1345 }
1280 } 1346 }
1281 }; 1347 };
1282 1348
1283 1349
1350 /*
Erik Corry 2011/06/20 20:41:26 commented code
Vyacheslav Egorov (Chromium) 2011/06/21 11:44:48 Done.
1351 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
1352 public:
1353 virtual Object* RetainAs(Object* object) {
1354 const Object* old_object = object;
1355 MapWord map_word = HeapObject::cast(object)->map_word();
1356 if (map_word.IsForwardingAddress()) {
1357 object = map_word.ToForwardingAddress();
1358 }
1359 PrintF("%p -> %p\n", (void*) old_object, (void*) object);
1360 object->Print();
1361 return object;
1362 }
1363 };
1364 */
1365
1284 void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { 1366 void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
1285 ASSERT(IsMarked(object)); 1367 ASSERT(IsMarked(object));
1286 ASSERT(HEAP->Contains(object)); 1368 ASSERT(HEAP->Contains(object));
1287 if (object->IsMap()) { 1369 if (object->IsMap()) {
1288 Map* map = Map::cast(object); 1370 Map* map = Map::cast(object);
1289 if (FLAG_cleanup_code_caches_at_gc) { 1371 if (FLAG_cleanup_code_caches_at_gc) {
1290 map->ClearCodeCache(heap()); 1372 map->ClearCodeCache(heap());
1291 } 1373 }
1292 1374
1293 // When map collection is enabled we have to mark through map's transitions 1375 // When map collection is enabled we have to mark through map's transitions
(...skipping 10 matching lines...) Expand all
1304 } else { 1386 } else {
1305 marking_deque_.PushBlack(object); 1387 marking_deque_.PushBlack(object);
1306 } 1388 }
1307 } 1389 }
1308 1390
1309 1391
1310 void MarkCompactCollector::MarkMapContents(Map* map) { 1392 void MarkCompactCollector::MarkMapContents(Map* map) {
1311 // Mark prototype transitions array but don't push it into marking stack. 1393 // Mark prototype transitions array but don't push it into marking stack.
1312 // This will make references from it weak. We will clean dead prototype 1394 // This will make references from it weak. We will clean dead prototype
1313 // transitions in ClearNonLiveTransitions. 1395 // transitions in ClearNonLiveTransitions.
1314 FixedArray* prototype_transitions = map->unchecked_prototype_transitions(); 1396 FixedArray* prototype_transitions = map->prototype_transitions();
1315 MarkBit mark = Marking::MarkBitFrom(prototype_transitions); 1397 MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
1316 if (!mark.Get()) mark.Set(); 1398 if (!mark.Get()) mark.Set();
1317 1399
1318 Object* raw_descriptor_array = 1400 Object** raw_descriptor_array_slot =
1319 *HeapObject::RawField(map, 1401 HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
1320 Map::kInstanceDescriptorsOrBitField3Offset); 1402 Object* raw_descriptor_array = *raw_descriptor_array_slot;
1321 if (!raw_descriptor_array->IsSmi()) { 1403 if (!raw_descriptor_array->IsSmi()) {
1322 MarkDescriptorArray( 1404 MarkDescriptorArray(
1323 reinterpret_cast<DescriptorArray*>(raw_descriptor_array)); 1405 reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
1324 } 1406 }
1325 1407
1326 // Mark the Object* fields of the Map. 1408 // Mark the Object* fields of the Map.
1327 // Since the descriptor array has been marked already, it is fine 1409 // Since the descriptor array has been marked already, it is fine
1328 // that one of these fields contains a pointer to it. 1410 // that one of these fields contains a pointer to it.
1329 Object** start_slot = HeapObject::RawField(map, 1411 Object** start_slot = HeapObject::RawField(map,
1330 Map::kPointerFieldsBeginOffset); 1412 Map::kPointerFieldsBeginOffset);
1331 1413
1332 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); 1414 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
1333 1415
1334 StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot); 1416 StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
1335 } 1417 }
1336 1418
1337 1419
1338 void MarkCompactCollector::MarkDescriptorArray( 1420 void MarkCompactCollector::MarkDescriptorArray(
1339 DescriptorArray* descriptors) { 1421 DescriptorArray* descriptors) {
1340 MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors); 1422 MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
1341 if (descriptors_mark.Get()) return; 1423 if (descriptors_mark.Get()) return;
1342 // Empty descriptor array is marked as a root before any maps are marked. 1424 // Empty descriptor array is marked as a root before any maps are marked.
1343 ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array()); 1425 ASSERT(descriptors != heap()->empty_descriptor_array());
1344 SetMark(descriptors, descriptors_mark); 1426 SetMark(descriptors, descriptors_mark);
1345 1427
1346 FixedArray* contents = reinterpret_cast<FixedArray*>( 1428 FixedArray* contents = reinterpret_cast<FixedArray*>(
1347 descriptors->get(DescriptorArray::kContentArrayIndex)); 1429 descriptors->get(DescriptorArray::kContentArrayIndex));
1348 ASSERT(contents->IsHeapObject()); 1430 ASSERT(contents->IsHeapObject());
1349 ASSERT(!IsMarked(contents)); 1431 ASSERT(!IsMarked(contents));
1350 ASSERT(contents->IsFixedArray()); 1432 ASSERT(contents->IsFixedArray());
1351 ASSERT(contents->length() >= 2); 1433 ASSERT(contents->length() >= 2);
1352 MarkBit contents_mark = Marking::MarkBitFrom(contents); 1434 MarkBit contents_mark = Marking::MarkBitFrom(contents);
1353 SetMark(contents, contents_mark); 1435 SetMark(contents, contents_mark);
1354 // Contents contains (value, details) pairs. If the details say that the type 1436 // Contents contains (value, details) pairs. If the details say that the type
1355 // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, 1437 // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
1356 // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as 1438 // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
1357 // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and 1439 // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
1358 // CONSTANT_TRANSITION is the value an Object* (a Map*). 1440 // CONSTANT_TRANSITION is the value an Object* (a Map*).
1359 for (int i = 0; i < contents->length(); i += 2) { 1441 for (int i = 0; i < contents->length(); i += 2) {
1360 // If the pair (value, details) at index i, i+1 is not 1442 // If the pair (value, details) at index i, i+1 is not
1361 // a transition or null descriptor, mark the value. 1443 // a transition or null descriptor, mark the value.
1362 PropertyDetails details(Smi::cast(contents->get(i + 1))); 1444 PropertyDetails details(Smi::cast(contents->get(i + 1)));
1445
1446 Object** slot = contents->data_start() + i;
1447 Object* value = *slot;
1448 if (!value->IsHeapObject()) continue;
1449
1450 RecordSlot(reinterpret_cast<Address>(contents), slot, *slot);
1451
1363 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) { 1452 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
1364 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i)); 1453 HeapObject* object = HeapObject::cast(value);
1365 if (object->IsHeapObject()) { 1454 MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
1366 MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object)); 1455 if (!mark.Get()) {
1367 if (!mark.Get()) { 1456 SetMark(HeapObject::cast(object), mark);
1368 SetMark(HeapObject::cast(object), mark); 1457 marking_deque_.PushBlack(object);
1369 marking_deque_.PushBlack(object);
1370 }
1371 } 1458 }
1372 } 1459 }
1373 } 1460 }
1374 // The DescriptorArray descriptors contains a pointer to its contents array, 1461 // The DescriptorArray descriptors contains a pointer to its contents array,
1375 // but the contents array is already marked. 1462 // but the contents array is already marked.
1376 marking_deque_.PushBlack(descriptors); 1463 marking_deque_.PushBlack(descriptors);
1377 } 1464 }
1378 1465
1379 1466
1380 void MarkCompactCollector::CreateBackPointers() { 1467 void MarkCompactCollector::CreateBackPointers() {
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
1493 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { 1580 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1494 Object* o = *p; 1581 Object* o = *p;
1495 if (!o->IsHeapObject()) return false; 1582 if (!o->IsHeapObject()) return false;
1496 HeapObject* heap_object = HeapObject::cast(o); 1583 HeapObject* heap_object = HeapObject::cast(o);
1497 MarkBit mark = Marking::MarkBitFrom(heap_object); 1584 MarkBit mark = Marking::MarkBitFrom(heap_object);
1498 return !mark.Get(); 1585 return !mark.Get();
1499 } 1586 }
1500 1587
1501 1588
1502 void MarkCompactCollector::MarkSymbolTable() { 1589 void MarkCompactCollector::MarkSymbolTable() {
1503 SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table(); 1590 SymbolTable* symbol_table = heap()->symbol_table();
1504 // Mark the symbol table itself. 1591 // Mark the symbol table itself.
1505 MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table); 1592 MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
1506 SetMark(symbol_table, symbol_table_mark); 1593 SetMark(symbol_table, symbol_table_mark);
1507 // Explicitly mark the prefix. 1594 // Explicitly mark the prefix.
1508 MarkingVisitor marker(heap()); 1595 MarkingVisitor marker(heap());
1509 symbol_table->IteratePrefix(&marker); 1596 symbol_table->IteratePrefix(&marker);
1510 ProcessMarkingDeque(); 1597 ProcessMarkingDeque();
1511 } 1598 }
1512 1599
1513 1600
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after
1774 ProcessExternalMarking(); 1861 ProcessExternalMarking();
1775 1862
1776 AfterMarking(); 1863 AfterMarking();
1777 } 1864 }
1778 1865
1779 1866
1780 void MarkCompactCollector::AfterMarking() { 1867 void MarkCompactCollector::AfterMarking() {
1781 // Prune the symbol table removing all symbols only pointed to by the 1868 // Prune the symbol table removing all symbols only pointed to by the
1782 // symbol table. Cannot use symbol_table() here because the symbol 1869 // symbol table. Cannot use symbol_table() here because the symbol
1783 // table is marked. 1870 // table is marked.
1784 SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table(); 1871 SymbolTable* symbol_table = heap()->symbol_table();
1785 SymbolTableCleaner v(heap()); 1872 SymbolTableCleaner v(heap());
1786 symbol_table->IterateElements(&v); 1873 symbol_table->IterateElements(&v);
1787 symbol_table->ElementsRemoved(v.PointersRemoved()); 1874 symbol_table->ElementsRemoved(v.PointersRemoved());
1788 heap()->external_string_table_.Iterate(&v); 1875 heap()->external_string_table_.Iterate(&v);
1789 heap()->external_string_table_.CleanUp(); 1876 heap()->external_string_table_.CleanUp();
1790 1877
1791 // Process the weak references. 1878 // Process the weak references.
1792 MarkCompactWeakObjectRetainer mark_compact_object_retainer; 1879 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
1793 heap()->ProcessWeakReferences(&mark_compact_object_retainer); 1880 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
1794 1881
(...skipping 30 matching lines...) Expand all
1825 live_code_objects_size_ += obj->Size(); 1912 live_code_objects_size_ += obj->Size();
1826 } else if (heap()->lo_space()->Contains(obj)) { 1913 } else if (heap()->lo_space()->Contains(obj)) {
1827 live_lo_objects_size_ += obj->Size(); 1914 live_lo_objects_size_ += obj->Size();
1828 } else { 1915 } else {
1829 UNREACHABLE(); 1916 UNREACHABLE();
1830 } 1917 }
1831 } 1918 }
1832 #endif // DEBUG 1919 #endif // DEBUG
1833 1920
1834 1921
1835 // Safe to use during marking phase only.
1836 bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1837 return object->map()->instance_type() == MAP_TYPE;
1838 }
1839
1840
1841 void MarkCompactCollector::ClearNonLiveTransitions() { 1922 void MarkCompactCollector::ClearNonLiveTransitions() {
1842 HeapObjectIterator map_iterator(heap()->map_space()); 1923 HeapObjectIterator map_iterator(heap()->map_space());
1843 // Iterate over the map space, setting map transitions that go from 1924 // Iterate over the map space, setting map transitions that go from
1844 // a marked map to an unmarked map to null transitions. At the same time, 1925 // a marked map to an unmarked map to null transitions. At the same time,
1845 // set all the prototype fields of maps back to their original value, 1926 // set all the prototype fields of maps back to their original value,
1846 // dropping the back pointers temporarily stored in the prototype field. 1927 // dropping the back pointers temporarily stored in the prototype field.
1847 // Setting the prototype field requires following the linked list of 1928 // Setting the prototype field requires following the linked list of
1848 // back pointers, reversing them all at once. This allows us to find 1929 // back pointers, reversing them all at once. This allows us to find
1849 // those maps with map transitions that need to be nulled, and only 1930 // those maps with map transitions that need to be nulled, and only
1850 // scan the descriptor arrays of those maps, not all maps. 1931 // scan the descriptor arrays of those maps, not all maps.
1851 // All of these actions are carried out only on maps of JSObjects 1932 // All of these actions are carried out only on maps of JSObjects
1852 // and related subtypes. 1933 // and related subtypes.
1853 for (HeapObject* obj = map_iterator.Next(); 1934 for (HeapObject* obj = map_iterator.Next();
1854 obj != NULL; obj = map_iterator.Next()) { 1935 obj != NULL; obj = map_iterator.Next()) {
1855 Map* map = reinterpret_cast<Map*>(obj); 1936 Map* map = reinterpret_cast<Map*>(obj);
1856 MarkBit map_mark = Marking::MarkBitFrom(map); 1937 MarkBit map_mark = Marking::MarkBitFrom(map);
1857 if (map->IsFreeSpace()) continue; 1938 if (map->IsFreeSpace()) continue;
1858 1939
1859 ASSERT(SafeIsMap(map)); 1940 ASSERT(map->IsMap());
1860 // Only JSObject and subtypes have map transitions and back pointers. 1941 // Only JSObject and subtypes have map transitions and back pointers.
1861 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue; 1942 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
1862 if (map->instance_type() > JS_FUNCTION_TYPE) continue; 1943 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
1863 1944
1864 if (map_mark.Get() && 1945 if (map_mark.Get() &&
1865 map->attached_to_shared_function_info()) { 1946 map->attached_to_shared_function_info()) {
1866 // This map is used for inobject slack tracking and has been detached 1947 // This map is used for inobject slack tracking and has been detached
1867 // from SharedFunctionInfo during the mark phase. 1948 // from SharedFunctionInfo during the mark phase.
1868 // Since it survived the GC, reattach it now. 1949 // Since it survived the GC, reattach it now.
1869 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map); 1950 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
1870 } 1951 }
1871 1952
1872 // Clear dead prototype transitions. 1953 // Clear dead prototype transitions.
1873 FixedArray* prototype_transitions = map->unchecked_prototype_transitions(); 1954 FixedArray* prototype_transitions = map->prototype_transitions();
1874 if (prototype_transitions->length() > 0) { 1955 if (prototype_transitions->length() > 0) {
1875 int finger = Smi::cast(prototype_transitions->get(0))->value(); 1956 int finger = Smi::cast(prototype_transitions->get(0))->value();
1876 int new_finger = 1; 1957 int new_finger = 1;
1877 for (int i = 1; i < finger; i += 2) { 1958 for (int i = 1; i < finger; i += 2) {
1878 HeapObject* prototype = HeapObject::cast(prototype_transitions->get(i)); 1959 HeapObject* prototype = HeapObject::cast(prototype_transitions->get(i));
1879 Map* cached_map = Map::cast(prototype_transitions->get(i + 1)); 1960 Map* cached_map = Map::cast(prototype_transitions->get(i + 1));
1880 MarkBit prototype_mark = Marking::MarkBitFrom(prototype); 1961 MarkBit prototype_mark = Marking::MarkBitFrom(prototype);
1881 MarkBit cached_map_mark = Marking::MarkBitFrom(cached_map); 1962 MarkBit cached_map_mark = Marking::MarkBitFrom(cached_map);
1882 if (prototype_mark.Get() && cached_map_mark.Get()) { 1963 if (prototype_mark.Get() && cached_map_mark.Get()) {
1883 if (new_finger != i) { 1964 if (new_finger != i) {
1884 prototype_transitions->set_unchecked(heap_, 1965 prototype_transitions->set_unchecked(heap_,
1885 new_finger, 1966 new_finger,
1886 prototype, 1967 prototype,
1887 UPDATE_WRITE_BARRIER); 1968 UPDATE_WRITE_BARRIER);
1888 prototype_transitions->set_unchecked(heap_, 1969 prototype_transitions->set_unchecked(heap_,
1889 new_finger + 1, 1970 new_finger + 1,
1890 cached_map, 1971 cached_map,
1891 SKIP_WRITE_BARRIER); 1972 SKIP_WRITE_BARRIER);
1892 } 1973 }
1974
1975 Object** prototype_slot =
1976 prototype_transitions->data_start() + new_finger;
1977 RecordSlot(reinterpret_cast<Address>(prototype_transitions),
1978 prototype_slot,
1979 prototype);
1893 new_finger += 2; 1980 new_finger += 2;
1894 } 1981 }
1895 } 1982 }
1896 1983
1897 // Fill slots that became free with undefined value. 1984 // Fill slots that became free with undefined value.
1898 Object* undefined = heap()->raw_unchecked_undefined_value(); 1985 Object* undefined = heap()->undefined_value();
1899 for (int i = new_finger; i < finger; i++) { 1986 for (int i = new_finger; i < finger; i++) {
1900 prototype_transitions->set_unchecked(heap_, 1987 prototype_transitions->set_unchecked(heap_,
1901 i, 1988 i,
1902 undefined, 1989 undefined,
1903 SKIP_WRITE_BARRIER); 1990 SKIP_WRITE_BARRIER);
1991
1992 // TODO(gc) we should not evacuate first page of data space.
1993 // but we are doing it now to increase coverage.
1994 Object** undefined_slot =
1995 prototype_transitions->data_start() + i;
1996 RecordSlot(reinterpret_cast<Address>(prototype_transitions),
1997 undefined_slot,
1998 undefined);
1904 } 1999 }
1905 prototype_transitions->set_unchecked(0, Smi::FromInt(new_finger)); 2000 prototype_transitions->set_unchecked(0, Smi::FromInt(new_finger));
1906 } 2001 }
1907 2002
1908 // Follow the chain of back pointers to find the prototype. 2003 // Follow the chain of back pointers to find the prototype.
1909 Map* current = map; 2004 Map* current = map;
1910 while (SafeIsMap(current)) { 2005 while (current->IsMap()) {
1911 current = reinterpret_cast<Map*>(current->prototype()); 2006 current = reinterpret_cast<Map*>(current->prototype());
1912 ASSERT(current->IsHeapObject()); 2007 ASSERT(current->IsHeapObject());
1913 } 2008 }
1914 Object* real_prototype = current; 2009 Object* real_prototype = current;
1915 2010
1916 // Follow back pointers, setting them to prototype, 2011 // Follow back pointers, setting them to prototype,
1917 // clearing map transitions when necessary. 2012 // clearing map transitions when necessary.
1918 current = map; 2013 current = map;
1919 bool on_dead_path = !map_mark.Get(); 2014 bool on_dead_path = !map_mark.Get();
1920 Object* next; 2015 Object* next;
1921 while (SafeIsMap(current)) { 2016 while (current->IsMap()) {
1922 next = current->prototype(); 2017 next = current->prototype();
1923 // There should never be a dead map above a live map. 2018 // There should never be a dead map above a live map.
1924 MarkBit current_mark = Marking::MarkBitFrom(current); 2019 MarkBit current_mark = Marking::MarkBitFrom(current);
1925 ASSERT(on_dead_path || current_mark.Get()); 2020 bool is_alive = current_mark.Get();
2021 ASSERT(on_dead_path || is_alive);
1926 2022
1927 // A live map above a dead map indicates a dead transition. 2023 // A live map above a dead map indicates a dead transition.
1928 // This test will always be false on the first iteration. 2024 // This test will always be false on the first iteration.
1929 if (on_dead_path && current_mark.Get()) { 2025 if (on_dead_path && is_alive) {
1930 on_dead_path = false; 2026 on_dead_path = false;
1931 current->ClearNonLiveTransitions(heap(), real_prototype); 2027 current->ClearNonLiveTransitions(heap(), real_prototype);
1932 } 2028 }
1933 *HeapObject::RawField(current, Map::kPrototypeOffset) = 2029 *HeapObject::RawField(current, Map::kPrototypeOffset) =
1934 real_prototype; 2030 real_prototype;
2031
2032 if (is_alive) {
2033 RecordSlot(current->address(),
2034 HeapObject::RawField(current, Map::kPrototypeOffset),
2035 real_prototype);
2036 }
1935 current = reinterpret_cast<Map*>(next); 2037 current = reinterpret_cast<Map*>(next);
1936 } 2038 }
1937 } 2039 }
1938 } 2040 }
1939 2041
1940 2042
1941 // We scavange new space simultaneously with sweeping. This is done in two 2043 // We scavange new space simultaneously with sweeping. This is done in two
1942 // passes. 2044 // passes.
1943 // 2045 //
1944 // The first pass migrates all alive objects from one semispace to another or 2046 // The first pass migrates all alive objects from one semispace to another or
1945 // promotes them to old space. Forwarding address is written directly into 2047 // promotes them to old space. Forwarding address is written directly into
1946 // first word of object without any encoding. If object is dead we write 2048 // first word of object without any encoding. If object is dead we write
1947 // NULL as a forwarding address. 2049 // NULL as a forwarding address.
1948 // 2050 //
1949 // The second pass updates pointers to new space in all spaces. It is possible 2051 // The second pass updates pointers to new space in all spaces. It is possible
1950 // to encounter pointers to dead new space objects during traversal of pointers 2052 // to encounter pointers to dead new space objects during traversal of pointers
1951 // to new space. We should clear them to avoid encountering them during next 2053 // to new space. We should clear them to avoid encountering them during next
1952 // pointer iteration. This is an issue if the store buffer overflows and we 2054 // pointer iteration. This is an issue if the store buffer overflows and we
1953 // have to scan the entire old space, including dead objects, looking for 2055 // have to scan the entire old space, including dead objects, looking for
1954 // pointers to new space. 2056 // pointers to new space.
1955 static void MigrateObject(Heap* heap, 2057 void MarkCompactCollector::MigrateObject(Address dst,
1956 Address dst, 2058 Address src,
1957 Address src, 2059 int size,
1958 int size, 2060 AllocationSpace dest) {
1959 bool to_old_space) { 2061 ASSERT(dest == OLD_POINTER_SPACE ||
1960 if (to_old_space) { 2062 dest == OLD_DATA_SPACE ||
1961 heap->CopyBlockToOldSpaceAndUpdateWriteBarrier(dst, src, size); 2063 dest == LO_SPACE ||
2064 dest == NEW_SPACE);
2065
2066 if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
2067 Address src_slot = src;
2068 Address dst_slot = dst;
2069 ASSERT(IsAligned(size, kPointerSize));
2070
2071 for (int remaining = size / kPointerSize;
2072 remaining > 0;
2073 remaining--) {
Erik Corry 2011/06/20 20:41:26 Fits on one line.
Vyacheslav Egorov (Chromium) 2011/06/21 11:44:48 Done.
2074 Object* value = Memory::Object_at(src_slot);
2075
2076 Memory::Object_at(dst_slot) = value;
2077
2078 if (heap_->InNewSpace(value)) {
2079 heap_->store_buffer()->Mark(dst_slot);
2080 } else if (value->IsHeapObject() &&
2081 MarkCompactCollector::IsOnEvacuationCandidate(value)) {
2082 slots_buffer_.Add(reinterpret_cast<Object**>(dst_slot));
2083 }
2084
2085 src_slot += kPointerSize;
2086 dst_slot += kPointerSize;
2087 }
1962 } else { 2088 } else {
1963 heap->CopyBlock(dst, src, size); 2089 heap_->CopyBlock(dst, src, size);
1964 } 2090 }
1965 Memory::Address_at(src) = dst; 2091 Memory::Address_at(src) = dst;
1966 } 2092 }
1967 2093
1968 2094
1969 class StaticPointersToNewGenUpdatingVisitor : public
1970 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
1971 public:
1972 static inline void VisitPointer(Heap* heap, Object** p) {
1973 if (!(*p)->IsHeapObject()) return;
1974
1975 HeapObject* obj = HeapObject::cast(*p);
1976 Address old_addr = obj->address();
1977
1978 if (heap->new_space()->Contains(obj)) {
1979 ASSERT(heap->InFromSpace(*p));
1980 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1981 ASSERT(!heap->InFromSpace(*p));
1982 }
1983 }
1984 };
1985
1986
1987 // Visitor for updating pointers from live objects in old spaces to new space. 2095 // Visitor for updating pointers from live objects in old spaces to new space.
1988 // It does not expect to encounter pointers to dead objects. 2096 // It does not expect to encounter pointers to dead objects.
1989 class PointersToNewGenUpdatingVisitor: public ObjectVisitor { 2097 class PointersUpdatingVisitor: public ObjectVisitor {
1990 public: 2098 public:
1991 explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { } 2099 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
1992 2100
1993 void VisitPointer(Object** p) { 2101 void VisitPointer(Object** p) {
1994 StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p); 2102 UpdatePointer(p);
1995 } 2103 }
1996 2104
1997 void VisitPointers(Object** start, Object** end) { 2105 void VisitPointers(Object** start, Object** end) {
1998 for (Object** p = start; p < end; p++) { 2106 for (Object** p = start; p < end; p++) UpdatePointer(p);
1999 StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
2000 }
2001 } 2107 }
2002 2108
2003 void VisitCodeTarget(RelocInfo* rinfo) { 2109 void VisitCodeTarget(RelocInfo* rinfo) {
2004 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); 2110 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2005 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); 2111 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2006 VisitPointer(&target); 2112 VisitPointer(&target);
2007 rinfo->set_target_address(Code::cast(target)->instruction_start(), NULL); 2113 rinfo->set_target_address(Code::cast(target)->instruction_start(), NULL);
2008 } 2114 }
2009 2115
2010 void VisitDebugTarget(RelocInfo* rinfo) { 2116 void VisitDebugTarget(RelocInfo* rinfo) {
2011 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && 2117 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2012 rinfo->IsPatchedReturnSequence()) || 2118 rinfo->IsPatchedReturnSequence()) ||
2013 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && 2119 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2014 rinfo->IsPatchedDebugBreakSlotSequence())); 2120 rinfo->IsPatchedDebugBreakSlotSequence()));
2015 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); 2121 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2016 VisitPointer(&target); 2122 VisitPointer(&target);
2017 rinfo->set_call_address(Code::cast(target)->instruction_start()); 2123 rinfo->set_call_address(Code::cast(target)->instruction_start());
2018 } 2124 }
2125
2019 private: 2126 private:
2127 inline void UpdatePointer(Object** p) {
2128 if (!(*p)->IsHeapObject()) return;
2129
2130 HeapObject* obj = HeapObject::cast(*p);
2131
2132 if (heap_->InNewSpace(obj) ||
2133 MarkCompactCollector::IsOnEvacuationCandidate(obj)) {
2134 ASSERT(obj->map_word().IsForwardingAddress());
2135 *p = obj->map_word().ToForwardingAddress();
2136 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*p));
2137 }
2138 }
2139
2020 Heap* heap_; 2140 Heap* heap_;
2021 }; 2141 };
2022 2142
2023 2143
2024 static void UpdatePointerToNewGen(HeapObject** p, HeapObject* object) { 2144 static void UpdatePointer(HeapObject** p, HeapObject* object) {
2025 ASSERT(HEAP->InFromSpace(object));
2026 ASSERT(*p == object); 2145 ASSERT(*p == object);
2027 2146
2028 Address old_addr = object->address(); 2147 Address old_addr = object->address();
2029 2148
2030 Address new_addr = Memory::Address_at(old_addr); 2149 Address new_addr = Memory::Address_at(old_addr);
2031 2150
2032 // The new space sweep will overwrite the map word of dead objects 2151 // The new space sweep will overwrite the map word of dead objects
2033 // with NULL. In this case we do not need to transfer this entry to 2152 // with NULL. In this case we do not need to transfer this entry to
2034 // the store buffer which we are rebuilding. 2153 // the store buffer which we are rebuilding.
2035 if (new_addr != NULL) { 2154 if (new_addr != NULL) {
2036 *p = HeapObject::FromAddress(new_addr); 2155 *p = HeapObject::FromAddress(new_addr);
2037 } else { 2156 } else {
2038 // We have to zap this pointer, because the store buffer may overflow later, 2157 // We have to zap this pointer, because the store buffer may overflow later,
2039 // and then we have to scan the entire heap and we don't want to find 2158 // and then we have to scan the entire heap and we don't want to find
2040 // spurious newspace pointers in the old space. 2159 // spurious newspace pointers in the old space.
2041 *p = HeapObject::FromAddress(NULL); // Fake heap object not in new space. 2160 *p = HeapObject::FromAddress(NULL); // Fake heap object not in new space.
2042 } 2161 }
2043 } 2162 }
2044 2163
2045 2164
2046 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, 2165 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2047 Object** p) { 2166 Object** p) {
2048 Address old_addr = HeapObject::cast(*p)->address(); 2167 MapWord map_word = HeapObject::cast(*p)->map_word();
2049 Address new_addr = Memory::Address_at(old_addr); 2168
2050 return String::cast(HeapObject::FromAddress(new_addr)); 2169 if (map_word.IsForwardingAddress()) {
2170 return String::cast(map_word.ToForwardingAddress());
2171 }
2172
2173 return String::cast(*p);
2051 } 2174 }
2052 2175
2053 2176
2054 static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) { 2177 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2178 int object_size) {
2055 Object* result; 2179 Object* result;
2056 2180
2057 if (object_size > heap->MaxObjectSizeInPagedSpace()) { 2181 if (object_size > heap()->MaxObjectSizeInPagedSpace()) {
2058 MaybeObject* maybe_result = 2182 MaybeObject* maybe_result =
2059 heap->lo_space()->AllocateRawFixedArray(object_size); 2183 heap()->lo_space()->AllocateRawFixedArray(object_size);
2060 if (maybe_result->ToObject(&result)) { 2184 if (maybe_result->ToObject(&result)) {
2061 HeapObject* target = HeapObject::cast(result); 2185 HeapObject* target = HeapObject::cast(result);
2062 MigrateObject(heap, target->address(), object->address(), object_size, 2186 MigrateObject(target->address(),
2063 true); 2187 object->address(),
2064 heap->mark_compact_collector()->tracer()-> 2188 object_size,
2189 LO_SPACE);
2190 heap()->mark_compact_collector()->tracer()->
2065 increment_promoted_objects_size(object_size); 2191 increment_promoted_objects_size(object_size);
2066 return true; 2192 return true;
2067 } 2193 }
2068 } else { 2194 } else {
2069 OldSpace* target_space = heap->TargetSpace(object); 2195 OldSpace* target_space = heap()->TargetSpace(object);
2070 2196
2071 ASSERT(target_space == heap->old_pointer_space() || 2197 ASSERT(target_space == heap()->old_pointer_space() ||
2072 target_space == heap->old_data_space()); 2198 target_space == heap()->old_data_space());
2073 MaybeObject* maybe_result = target_space->AllocateRaw(object_size); 2199 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2074 if (maybe_result->ToObject(&result)) { 2200 if (maybe_result->ToObject(&result)) {
2075 HeapObject* target = HeapObject::cast(result); 2201 HeapObject* target = HeapObject::cast(result);
2076 MigrateObject(heap, 2202 MigrateObject(target->address(),
2077 target->address(),
2078 object->address(), 2203 object->address(),
2079 object_size, 2204 object_size,
2080 target_space == heap->old_pointer_space()); 2205 target_space->identity());
2081 heap->mark_compact_collector()->tracer()-> 2206 heap()->mark_compact_collector()->tracer()->
2082 increment_promoted_objects_size(object_size); 2207 increment_promoted_objects_size(object_size);
2083 return true; 2208 return true;
2084 } 2209 }
2085 } 2210 }
2086 2211
2087 return false; 2212 return false;
2088 } 2213 }
2089 2214
2090 2215
2091 void MarkCompactCollector::SweepNewSpace(NewSpace* space) { 2216 void MarkCompactCollector::EvacuateNewSpace() {
2092 heap_->CheckNewSpaceExpansionCriteria(); 2217 heap()->CheckNewSpaceExpansionCriteria();
2218
2219 NewSpace* new_space = heap()->new_space();
2093 2220
2094 // Store allocation range before flipping semispaces. 2221 // Store allocation range before flipping semispaces.
2095 Address from_bottom = space->bottom(); 2222 Address from_bottom = new_space->bottom();
2096 Address from_top = space->top(); 2223 Address from_top = new_space->top();
2097 2224
2098 // Flip the semispaces. After flipping, to space is empty, from space has 2225 // Flip the semispaces. After flipping, to space is empty, from space has
2099 // live objects. 2226 // live objects.
2100 space->Flip(); 2227 new_space->Flip();
2101 space->ResetAllocationInfo(); 2228 new_space->ResetAllocationInfo();
2102 2229
2103 int survivors_size = 0; 2230 int survivors_size = 0;
2104 2231
2105 // First pass: traverse all objects in inactive semispace, remove marks, 2232 // First pass: traverse all objects in inactive semispace, remove marks,
2106 // migrate live objects and write forwarding addresses. This stage puts 2233 // migrate live objects and write forwarding addresses. This stage puts
2107 // new entries in the store buffer and may cause some pages to be marked 2234 // new entries in the store buffer and may cause some pages to be marked
2108 // scan-on-scavenge. 2235 // scan-on-scavenge.
2109 SemiSpaceIterator from_it(from_bottom, from_top); 2236 SemiSpaceIterator from_it(from_bottom, from_top);
2110 for (HeapObject* object = from_it.Next(); 2237 for (HeapObject* object = from_it.Next();
2111 object != NULL; 2238 object != NULL;
2112 object = from_it.Next()) { 2239 object = from_it.Next()) {
2113 MarkBit mark_bit = Marking::MarkBitFrom(object); 2240 MarkBit mark_bit = Marking::MarkBitFrom(object);
2114 if (mark_bit.Get()) { 2241 if (mark_bit.Get()) {
2115 mark_bit.Clear(); 2242 mark_bit.Clear();
2116 heap_->mark_compact_collector()->tracer()->decrement_marked_count();
2117 2243
2118 int size = object->Size(); 2244 int size = object->Size();
2119 survivors_size += size; 2245 survivors_size += size;
2120 2246
2121 // Aggressively promote young survivors to the old space. 2247 // Aggressively promote young survivors to the old space.
2122 if (TryPromoteObject(heap_, object, size)) { 2248 if (TryPromoteObject(object, size)) {
2123 continue; 2249 continue;
2124 } 2250 }
2125 2251
2126 // Promotion failed. Just migrate object to another semispace. 2252 // Promotion failed. Just migrate object to another semispace.
2127 MaybeObject* allocation = space->AllocateRaw(size); 2253 MaybeObject* allocation = new_space->AllocateRaw(size);
2128 if (allocation->IsFailure()) { 2254 if (allocation->IsFailure()) {
2129 if (!space->AddFreshPage()) { 2255 if (!new_space->AddFreshPage()) {
2130 // Shouldn't happen. We are sweeping linearly, and to-space 2256 // Shouldn't happen. We are sweeping linearly, and to-space
2131 // has the same number of pages as from-space, so there is 2257 // has the same number of pages as from-space, so there is
2132 // always room. 2258 // always room.
2133 UNREACHABLE(); 2259 UNREACHABLE();
2134 } 2260 }
2135 allocation = space->AllocateRaw(size); 2261 allocation = new_space->AllocateRaw(size);
2136 ASSERT(!allocation->IsFailure()); 2262 ASSERT(!allocation->IsFailure());
2137 } 2263 }
2138 Object* target = allocation->ToObjectUnchecked(); 2264 Object* target = allocation->ToObjectUnchecked();
2139 MigrateObject(heap_, 2265
2140 HeapObject::cast(target)->address(), 2266 MigrateObject(HeapObject::cast(target)->address(),
2141 object->address(), 2267 object->address(),
2142 size, 2268 size,
2143 false); 2269 NEW_SPACE);
2144 } else { 2270 } else {
2145 // Process the dead object before we write a NULL into its header. 2271 // Process the dead object before we write a NULL into its header.
2146 LiveObjectList::ProcessNonLive(object); 2272 LiveObjectList::ProcessNonLive(object);
2147 2273
2148 // Mark dead objects in the new space with null in their map field. 2274 // Mark dead objects in the new space with null in their map field.
2149 Memory::Address_at(object->address()) = NULL; 2275 Memory::Address_at(object->address()) = NULL;
2150 } 2276 }
2151 } 2277 }
2152 2278
2279 heap_->IncrementYoungSurvivorsCounter(survivors_size);
2280 new_space->set_age_mark(new_space->top());
2281 }
2282
2283
2284 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
2285 AlwaysAllocateScope always_allocate;
2286
2287 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2288
2289 MarkBit::CellType* cells = p->markbits()->cells();
2290
2291 int last_cell_index =
2292 Bitmap::IndexToCell(
2293 Bitmap::CellAlignIndex(
2294 p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
2295
2296 int cell_index = Page::kFirstUsedCell;
2297 Address cell_base = p->ObjectAreaStart();
2298 int offsets[16];
2299
2300 for (cell_index = Page::kFirstUsedCell;
2301 cell_index < last_cell_index;
2302 cell_index++, cell_base += 32 * kPointerSize) {
2303 ASSERT((unsigned)cell_index ==
2304 Bitmap::IndexToCell(
2305 Bitmap::CellAlignIndex(
2306 p->AddressToMarkbitIndex(cell_base))));
2307 if (cells[cell_index] == 0) continue;
2308
2309 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2310 for (int i = 0; i < live_objects; i++) {
2311 Address object_addr = cell_base + offsets[i] * kPointerSize;
2312 HeapObject* object = HeapObject::FromAddress(object_addr);
2313 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2314
2315 int size = object->Size();
2316
2317 // This should never fail as we are in always allocate scope.
2318 Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
2319
2320 MigrateObject(HeapObject::cast(target)->address(),
2321 object_addr,
2322 size,
2323 space->identity());
2324 ASSERT(object->map_word().IsForwardingAddress());
2325 }
2326 }
2327 }
2328
2329
2330 void MarkCompactCollector::EvacuatePages() {
2331 int npages = evacuation_candidates_.length();
2332 for (int i = 0; i < npages; i++) {
2333 Page* p = evacuation_candidates_[i];
2334 EvacuateLiveObjectsFromPage(p);
2335 }
2336 }
2337
2338
2339 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
2340 EvacuateNewSpace();
2341 EvacuatePages();
2342
2153 // Second pass: find pointers to new space and update them. 2343 // Second pass: find pointers to new space and update them.
2154 PointersToNewGenUpdatingVisitor updating_visitor(heap_); 2344 PointersUpdatingVisitor updating_visitor(heap());
2155 2345
2156 // Update pointers in to space. 2346 // Update pointers in to space.
2157 SemiSpaceIterator to_it(space->bottom(), space->top()); 2347 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
2348 heap()->new_space()->top());
2158 for (HeapObject* object = to_it.Next(); 2349 for (HeapObject* object = to_it.Next();
2159 object != NULL; 2350 object != NULL;
2160 object = to_it.Next()) { 2351 object = to_it.Next()) {
2161 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(), 2352 Map* map = object->map();
2162 object); 2353 object->IterateBody(map->instance_type(),
2354 object->SizeFromMap(map),
2355 &updating_visitor);
2163 } 2356 }
2164 2357
2165 // Update roots. 2358 // Update roots.
2166 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 2359 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
2167 LiveObjectList::IterateElements(&updating_visitor); 2360 LiveObjectList::IterateElements(&updating_visitor);
2168 2361
2169 { 2362 {
2170 StoreBufferRebuildScope scope(heap_, 2363 StoreBufferRebuildScope scope(heap_,
2171 heap_->store_buffer(), 2364 heap_->store_buffer(),
2172 &Heap::ScavengeStoreBufferCallback); 2365 &Heap::ScavengeStoreBufferCallback);
2173 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointerToNewGen); 2366 heap_->store_buffer()->IteratePointersToNewSpace(
2367 &UpdatePointer, StoreBuffer::SKIP_SLOTS_IN_EVACUATION_CANDIDATES);
2174 } 2368 }
2369 slots_buffer_.Iterate(&updating_visitor);
2175 2370
2176 // Update pointers from cells. 2371 // Update pointers from cells.
2177 HeapObjectIterator cell_iterator(heap_->cell_space()); 2372 HeapObjectIterator cell_iterator(heap_->cell_space());
2178 for (HeapObject* cell = cell_iterator.Next(); 2373 for (HeapObject* cell = cell_iterator.Next();
2179 cell != NULL; 2374 cell != NULL;
2180 cell = cell_iterator.Next()) { 2375 cell = cell_iterator.Next()) {
2181 if (cell->IsJSGlobalPropertyCell()) { 2376 if (cell->IsJSGlobalPropertyCell()) {
2182 Address value_address = 2377 Address value_address =
2183 reinterpret_cast<Address>(cell) + 2378 reinterpret_cast<Address>(cell) +
2184 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 2379 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
2185 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); 2380 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
2186 } 2381 }
2187 } 2382 }
2188 2383
2189 // Update pointer from the global contexts list. 2384 // Update pointer from the global contexts list.
2190 updating_visitor.VisitPointer(heap_->global_contexts_list_address()); 2385 updating_visitor.VisitPointer(heap_->global_contexts_list_address());
2191 2386
2387 heap_->symbol_table()->Iterate(&updating_visitor);
2388
2192 // Update pointers from external string table. 2389 // Update pointers from external string table.
2193 heap_->UpdateNewSpaceReferencesInExternalStringTable( 2390 heap_->UpdateReferencesInExternalStringTable(
2194 &UpdateNewSpaceReferenceInExternalStringTableEntry); 2391 &UpdateReferenceInExternalStringTableEntry);
2195
2196 // All pointers were updated. Update auxiliary allocation info.
2197 heap_->IncrementYoungSurvivorsCounter(survivors_size);
2198 space->set_age_mark(space->top());
2199 2392
2200 // Update JSFunction pointers from the runtime profiler. 2393 // Update JSFunction pointers from the runtime profiler.
2201 heap_->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); 2394 heap_->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
2395
2396 #ifdef DEBUG
2397 VerifyEvacuation(heap_);
2398 #endif
2399
2400 int npages = evacuation_candidates_.length();
2401 for (int i = 0; i < npages; i++) {
2402 Page* p = evacuation_candidates_[i];
2403 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2404 space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
2405 p->set_scan_on_scavenge(false);
2406 // We are not clearing evacuation candidate flag here
2407 // because it is required to notify lazy sweeper to skip
2408 // this pages.
Erik Corry 2011/06/20 20:41:26 this -> these
2409 }
2202 } 2410 }
2203 2411
2204 2412
2205 INLINE(static uint32_t SweepFree(PagedSpace* space, 2413 INLINE(static uint32_t SweepFree(PagedSpace* space,
2206 Page* p, 2414 Page* p,
2207 uint32_t free_start, 2415 uint32_t free_start,
2208 uint32_t region_end, 2416 uint32_t region_end,
2209 uint32_t* cells)); 2417 uint32_t* cells));
2210 2418
2211 2419
(...skipping 306 matching lines...) Expand 10 before | Expand all | Expand 10 after
2518 2726
2519 2727
2520 // Sweeps a space conservatively. After this has been done the larger free 2728 // Sweeps a space conservatively. After this has been done the larger free
2521 // spaces have been put on the free list and the smaller ones have been 2729 // spaces have been put on the free list and the smaller ones have been
2522 // ignored and left untouched. A free space is always either ignored or put 2730 // ignored and left untouched. A free space is always either ignored or put
2523 // on the free list, never split up into two parts. This is important 2731 // on the free list, never split up into two parts. This is important
2524 // because it means that any FreeSpace maps left actually describe a region of 2732 // because it means that any FreeSpace maps left actually describe a region of
2525 // memory that can be ignored when scanning. Dead objects other than free 2733 // memory that can be ignored when scanning. Dead objects other than free
2526 // spaces will not contain the free space map. 2734 // spaces will not contain the free space map.
2527 int MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { 2735 int MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
2736 // We might start advancing sweeper before evacuation happened.
2737 if (p->IsEvacuationCandidate()) return 0;
2738
2528 int freed_bytes = 0; 2739 int freed_bytes = 0;
2529 2740
2530 MarkBit::CellType* cells = p->markbits()->cells(); 2741 MarkBit::CellType* cells = p->markbits()->cells();
2531 2742
2532 p->SetFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY); 2743 p->SetFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY);
2533 2744
2534 // This is the start of the 32 word block that we are currently looking at. 2745 // This is the start of the 32 word block that we are currently looking at.
2535 Address block_address = p->ObjectAreaStart(); 2746 Address block_address = p->ObjectAreaStart();
2536 2747
2537 int last_cell_index = 2748 int last_cell_index =
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
2659 space->ClearStats(); 2870 space->ClearStats();
2660 2871
2661 PageIterator it(space); 2872 PageIterator it(space);
2662 2873
2663 int freed_bytes = 0; 2874 int freed_bytes = 0;
2664 int newspace_size = space->heap()->new_space()->Size(); 2875 int newspace_size = space->heap()->new_space()->Size();
2665 2876
2666 while (it.has_next()) { 2877 while (it.has_next()) {
2667 Page* p = it.next(); 2878 Page* p = it.next();
2668 2879
2880 if (p->IsEvacuationCandidate()) {
2881 ASSERT(evacuation_candidates_.length() > 0);
2882 continue;
2883 }
2884
2669 switch (sweeper) { 2885 switch (sweeper) {
2670 case CONSERVATIVE: 2886 case CONSERVATIVE: {
2671 SweepConservatively(space, p); 2887 SweepConservatively(space, p);
2672 break; 2888 break;
2889 }
2673 case LAZY_CONSERVATIVE: 2890 case LAZY_CONSERVATIVE:
2674 freed_bytes += SweepConservatively(space, p); 2891 freed_bytes += SweepConservatively(space, p);
2675 // TODO(gc): tweak the heuristic. 2892 // TODO(gc): tweak the heuristic.
2676 if (freed_bytes >= newspace_size && p != space->LastPage()) { 2893 if (freed_bytes >= newspace_size && p != space->LastPage()) {
2677 space->SetPagesToSweep(p->next_page(), space->LastPage()); 2894 space->SetPagesToSweep(p->next_page(), space->LastPage());
2678 return; 2895 return;
2679 } 2896 }
2680 break; 2897 break;
2681 case PRECISE: 2898 case PRECISE:
2682 SweepPrecisely(space, p); 2899 SweepPrecisely(space, p);
2683 break; 2900 break;
2684 default: 2901 default:
2685 UNREACHABLE(); 2902 UNREACHABLE();
2686 } 2903 }
2687 } 2904 }
2688 2905
2689 // TODO(gc): set up allocation top and limit using the free list. 2906 // TODO(gc): set up allocation top and limit using the free list.
2690 } 2907 }
2691 2908
2692 2909
2693 void MarkCompactCollector::SweepSpaces() { 2910 void MarkCompactCollector::SweepSpaces() {
2694 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); 2911 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2695 #ifdef DEBUG 2912 #ifdef DEBUG
2696 state_ = SWEEP_SPACES; 2913 state_ = SWEEP_SPACES;
2697 #endif 2914 #endif
2698
2699 ASSERT(!IsCompacting());
2700 SweeperType how_to_sweep = 2915 SweeperType how_to_sweep =
2701 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; 2916 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
2702 if (sweep_precisely_) how_to_sweep = PRECISE; 2917 if (sweep_precisely_) how_to_sweep = PRECISE;
2703 // Noncompacting collections simply sweep the spaces to clear the mark 2918 // Noncompacting collections simply sweep the spaces to clear the mark
2704 // bits and free the nonlive blocks (for old and map spaces). We sweep 2919 // bits and free the nonlive blocks (for old and map spaces). We sweep
2705 // the map space last because freeing non-live maps overwrites them and 2920 // the map space last because freeing non-live maps overwrites them and
2706 // the other spaces rely on possibly non-live maps to get the sizes for 2921 // the other spaces rely on possibly non-live maps to get the sizes for
2707 // non-live objects. 2922 // non-live objects.
2708 SweepSpace(heap()->old_pointer_space(), how_to_sweep); 2923 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
2709 SweepSpace(heap()->old_data_space(), how_to_sweep); 2924 SweepSpace(heap()->old_data_space(), how_to_sweep);
2710 SweepSpace(heap()->code_space(), PRECISE); 2925 SweepSpace(heap()->code_space(), PRECISE);
2711 // TODO(gc): implement specialized sweeper for cell space. 2926 // TODO(gc): implement specialized sweeper for cell space.
2712 SweepSpace(heap()->cell_space(), PRECISE); 2927 SweepSpace(heap()->cell_space(), PRECISE);
2713 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); 2928 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2714 SweepNewSpace(heap_->new_space()); 2929 EvacuateNewSpaceAndCandidates();
2715 } 2930 }
2716 // TODO(gc): ClearNonLiveTransitions depends on precise sweeping of 2931 // TODO(gc): ClearNonLiveTransitions depends on precise sweeping of
2717 // map space to detect whether unmarked map became dead in this 2932 // map space to detect whether unmarked map became dead in this
2718 // collection or in one of the previous ones. 2933 // collection or in one of the previous ones.
2719 // TODO(gc): Implement specialized sweeper for map space. 2934 // TODO(gc): Implement specialized sweeper for map space.
2720 SweepSpace(heap()->map_space(), PRECISE); 2935 SweepSpace(heap()->map_space(), PRECISE);
2721 2936
2722 ASSERT(live_map_objects_size_ <= heap()->map_space()->Size()); 2937 ASSERT(live_map_objects_size_ <= heap()->map_space()->Size());
2723 2938
2724 // Deallocate unmarked objects and clear marked bits for marked objects. 2939 // Deallocate unmarked objects and clear marked bits for marked objects.
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
2812 #endif 3027 #endif
2813 #ifdef ENABLE_LOGGING_AND_PROFILING 3028 #ifdef ENABLE_LOGGING_AND_PROFILING
2814 if (obj->IsCode()) { 3029 if (obj->IsCode()) {
2815 PROFILE(isolate, CodeDeleteEvent(obj->address())); 3030 PROFILE(isolate, CodeDeleteEvent(obj->address()));
2816 } 3031 }
2817 #endif 3032 #endif
2818 } 3033 }
2819 3034
2820 3035
2821 void MarkCompactCollector::Initialize() { 3036 void MarkCompactCollector::Initialize() {
2822 StaticPointersToNewGenUpdatingVisitor::Initialize();
2823 StaticMarkingVisitor::Initialize(); 3037 StaticMarkingVisitor::Initialize();
2824 } 3038 }
2825 3039
2826 3040
3041 SlotsBuffer::SlotsBuffer()
3042 : buffers_(0),
3043 buffer_(NULL),
3044 idx_(kBufferSize),
3045 buffer_idx_(-1) {
3046 }
3047
3048
3049 SlotsBuffer::~SlotsBuffer() {
3050 for (int buffer_index = 0; buffer_index < buffers_.length(); ++buffer_index) {
3051 delete buffers_[buffer_index];
3052 }
3053 }
3054
3055
3056 void SlotsBuffer::Clear() {
3057 idx_ = kBufferSize;
3058 buffer_idx_ = -1;
3059 }
3060
3061
3062 void SlotsBuffer::Add(Object** slot) {
3063 if (idx_ == kBufferSize) {
3064 idx_ = 0;
3065 buffer_idx_++;
3066 if (buffer_idx_ == buffers_.length()) {
3067 buffers_.Add(new ObjectSlot[kBufferSize]);
3068 }
3069 buffer_ = buffers_[buffer_idx_];
3070 }
3071
3072 buffer_[idx_++] = slot;
3073 }
3074
3075
3076 void SlotsBuffer::Iterate(ObjectVisitor* visitor) {
3077 if (buffer_idx_ < 0) return;
3078
3079 for (int buffer_index = 0; buffer_index < buffer_idx_; ++buffer_index) {
3080 ObjectSlot* buffer = buffers_[buffer_index];
3081 for (int slot_idx = 0; slot_idx < kBufferSize; ++slot_idx) {
3082 visitor->VisitPointer(buffer[slot_idx]);
3083 }
3084 }
3085
3086 ObjectSlot* last_buffer = buffers_[buffer_idx_];
3087 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
3088 visitor->VisitPointer(last_buffer[slot_idx]);
3089 }
3090 }
3091
3092
3093 void SlotsBuffer::Report() {
3094 }
3095
3096
2827 } } // namespace v8::internal 3097 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698