OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 28 matching lines...) Expand all Loading... |
39 namespace v8 { | 39 namespace v8 { |
40 namespace internal { | 40 namespace internal { |
41 | 41 |
42 // ------------------------------------------------------------------------- | 42 // ------------------------------------------------------------------------- |
43 // MarkCompactCollector | 43 // MarkCompactCollector |
44 | 44 |
45 bool MarkCompactCollector::force_compaction_ = false; | 45 bool MarkCompactCollector::force_compaction_ = false; |
46 bool MarkCompactCollector::compacting_collection_ = false; | 46 bool MarkCompactCollector::compacting_collection_ = false; |
47 bool MarkCompactCollector::compact_on_next_gc_ = false; | 47 bool MarkCompactCollector::compact_on_next_gc_ = false; |
48 | 48 |
49 int MarkCompactCollector::previous_marked_count_ = 0; | |
50 GCTracer* MarkCompactCollector::tracer_ = NULL; | 49 GCTracer* MarkCompactCollector::tracer_ = NULL; |
51 | 50 |
52 | 51 |
53 #ifdef DEBUG | 52 #ifdef DEBUG |
54 MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE; | 53 MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE; |
55 | 54 |
56 // Counters used for debugging the marking phase of mark-compact or mark-sweep | 55 // Counters used for debugging the marking phase of mark-compact or mark-sweep |
57 // collection. | 56 // collection. |
58 int MarkCompactCollector::live_bytes_ = 0; | 57 int MarkCompactCollector::live_bytes_ = 0; |
59 int MarkCompactCollector::live_young_objects_size_ = 0; | 58 int MarkCompactCollector::live_young_objects_size_ = 0; |
60 int MarkCompactCollector::live_old_data_objects_size_ = 0; | 59 int MarkCompactCollector::live_old_data_objects_size_ = 0; |
61 int MarkCompactCollector::live_old_pointer_objects_size_ = 0; | 60 int MarkCompactCollector::live_old_pointer_objects_size_ = 0; |
62 int MarkCompactCollector::live_code_objects_size_ = 0; | 61 int MarkCompactCollector::live_code_objects_size_ = 0; |
63 int MarkCompactCollector::live_map_objects_size_ = 0; | 62 int MarkCompactCollector::live_map_objects_size_ = 0; |
64 int MarkCompactCollector::live_cell_objects_size_ = 0; | 63 int MarkCompactCollector::live_cell_objects_size_ = 0; |
65 int MarkCompactCollector::live_lo_objects_size_ = 0; | 64 int MarkCompactCollector::live_lo_objects_size_ = 0; |
66 #endif | 65 #endif |
67 | 66 |
| 67 // Assert invariant that must hold if marking is done by single thread. |
| 68 #define MARKING_INVARIANT_ST(cond) \ |
| 69 ASSERT(MarkCompactCollector::NumberOfMarkers() != 1 || (cond)) |
| 70 |
| 71 // Assert invariant that must hold if marking is done by multiple threads. |
| 72 #define MARKING_INVARIANT_MT(cond) \ |
| 73 ASSERT(MarkCompactCollector::NumberOfMarkers() == 1 || (cond)) |
68 | 74 |
69 void MarkCompactCollector::CollectGarbage() { | 75 void MarkCompactCollector::CollectGarbage() { |
70 // Make sure that Prepare() has been called. The individual steps below will | 76 // Make sure that Prepare() has been called. The individual steps below will |
71 // update the state as they proceed. | 77 // update the state as they proceed. |
72 ASSERT(state_ == PREPARE_GC); | 78 ASSERT(state_ == PREPARE_GC); |
73 | 79 |
74 // Prepare has selected whether to compact the old generation or not. | 80 // Prepare has selected whether to compact the old generation or not. |
75 // Tell the tracer. | 81 // Tell the tracer. |
76 if (IsCompacting()) tracer_->set_is_compacting(); | 82 if (IsCompacting()) tracer_->set_is_compacting(); |
77 | 83 |
(...skipping 13 matching lines...) Expand all Loading... |
91 PcToCodeCache::FlushPcToCodeCache(); | 97 PcToCodeCache::FlushPcToCodeCache(); |
92 | 98 |
93 RelocateObjects(); | 99 RelocateObjects(); |
94 } else { | 100 } else { |
95 SweepSpaces(); | 101 SweepSpaces(); |
96 PcToCodeCache::FlushPcToCodeCache(); | 102 PcToCodeCache::FlushPcToCodeCache(); |
97 } | 103 } |
98 | 104 |
99 Finish(); | 105 Finish(); |
100 | 106 |
101 // Save the count of marked objects remaining after the collection and | |
102 // null out the GC tracer. | |
103 previous_marked_count_ = tracer_->marked_count(); | |
104 ASSERT(previous_marked_count_ == 0); | |
105 tracer_ = NULL; | 107 tracer_ = NULL; |
106 } | 108 } |
107 | 109 |
108 | 110 |
109 void MarkCompactCollector::Prepare(GCTracer* tracer) { | 111 void MarkCompactCollector::Prepare(GCTracer* tracer) { |
110 // Rather than passing the tracer around we stash it in a static member | 112 // Rather than passing the tracer around we stash it in a static member |
111 // variable. | 113 // variable. |
112 tracer_ = tracer; | 114 tracer_ = tracer; |
113 | 115 |
114 #ifdef DEBUG | 116 #ifdef DEBUG |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
175 old_gen_recoverable += space->Waste() + space->AvailableFree(); | 177 old_gen_recoverable += space->Waste() + space->AvailableFree(); |
176 old_gen_used += space->Size(); | 178 old_gen_used += space->Size(); |
177 } | 179 } |
178 | 180 |
179 int old_gen_fragmentation = | 181 int old_gen_fragmentation = |
180 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used); | 182 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used); |
181 if (old_gen_fragmentation > kFragmentationLimit && | 183 if (old_gen_fragmentation > kFragmentationLimit && |
182 old_gen_recoverable > kFragmentationAllowed) { | 184 old_gen_recoverable > kFragmentationAllowed) { |
183 compact_on_next_gc_ = true; | 185 compact_on_next_gc_ = true; |
184 } | 186 } |
| 187 |
| 188 MARKING_INVARIANT_ST(tracer_->marked_count() == 0); |
| 189 MARKING_INVARIANT_MT(tracer_->marked_count() >= 0); |
185 } | 190 } |
186 | 191 |
187 | 192 |
188 // ------------------------------------------------------------------------- | 193 // ------------------------------------------------------------------------- |
189 // Phase 1: tracing and marking live objects. | 194 // Phase 1: tracing and marking live objects. |
190 // before: all objects are in normal state. | 195 // before: all objects are in normal state. |
191 // after: a live object's map pointer is marked as '00'. | 196 // after: a live object's map pointer is marked as '00'. |
192 | 197 |
193 // Marking all live objects in the heap as part of mark-sweep or mark-compact | 198 // Marking all live objects in the heap as part of mark-sweep or mark-compact |
194 // collection. Before marking, all objects are in their normal state. After | 199 // collection. Before marking, all objects are in their normal state. After |
(...skipping 11 matching lines...) Expand all Loading... |
206 // overflow flag. When the overflow flag is set, we continue marking objects | 211 // overflow flag. When the overflow flag is set, we continue marking objects |
207 // reachable from the objects on the marking stack, but no longer push them on | 212 // reachable from the objects on the marking stack, but no longer push them on |
208 // the marking stack. Instead, we mark them as both marked and overflowed. | 213 // the marking stack. Instead, we mark them as both marked and overflowed. |
209 // When the stack is in the overflowed state, objects marked as overflowed | 214 // When the stack is in the overflowed state, objects marked as overflowed |
210 // have been reached and marked but their children have not been visited yet. | 215 // have been reached and marked but their children have not been visited yet. |
211 // After emptying the marking stack, we clear the overflow flag and traverse | 216 // After emptying the marking stack, we clear the overflow flag and traverse |
212 // the heap looking for objects marked as overflowed, push them on the stack, | 217 // the heap looking for objects marked as overflowed, push them on the stack, |
213 // and continue with marking. This process repeats until all reachable | 218 // and continue with marking. This process repeats until all reachable |
214 // objects have been marked. | 219 // objects have been marked. |
215 | 220 |
216 static MarkingStack marking_stack; | 221 // Lock-free wait-free thread safe fixed size deque with work-stealing support. |
| 222 // |
| 223 // Each marker thread owns one such deque. Owner uses methods PushButtom and |
| 224 // PopButtom to populate and consume the queue. Other threads can use method |
| 225 // PopTop to steal objects from deque. Synchronisation is required only when |
| 226 // popping the last element or stealing from deque. |
| 227 // |
| 228 // This deque was first described in "Thread Scheduling for Multiprogrammed |
| 229 // Multiprocessors" by Arora et al. |
| 230 // |
| 231 // This deque does not own any fixed backing store instead each full GC a part |
| 232 // of from space is attached to it as a backing store. |
| 233 class Deque { |
| 234 public: |
| 235 |
| 236 // Prepare deque for usage in GC cycle: set memory region between high and |
| 237 // low as a backing store for deque. |
| 238 void Initialize(Address low, Address high) { |
| 239 base_ = reinterpret_cast<HeapObject* volatile*>(low); |
| 240 size_ = (high - low) / sizeof(HeapObject*); |
| 241 |
| 242 bot_ = 0; |
| 243 tagged_top_ = TaggedTop(0, 0).encode(); |
| 244 } |
| 245 |
| 246 inline void PushBottom(HeapObject* obj) { |
| 247 uint32_t curr_bot = bot_; |
| 248 base_[curr_bot++] = obj; |
| 249 bot_ = curr_bot; |
| 250 } |
| 251 |
| 252 inline HeapObject* PopBottom() { |
| 253 uint32_t curr_bot = bot_; |
| 254 |
| 255 if (curr_bot == 0) return NULL; |
| 256 |
| 257 curr_bot--; |
| 258 |
| 259 bot_ = curr_bot; |
| 260 |
| 261 HeapObject* obj = base_[curr_bot]; |
| 262 TaggedTop old_tagged_top(tagged_top_); |
| 263 |
| 264 if (curr_bot > old_tagged_top.top()) return obj; |
| 265 |
| 266 bot_ = 0; |
| 267 |
| 268 TaggedTop new_tagged_top (0, old_tagged_top.tag() + 1); |
| 269 |
| 270 if (curr_bot == old_tagged_top.top()) { |
| 271 if (AtomicCompareAndSwap(&tagged_top_, |
| 272 old_tagged_top.encode(), |
| 273 new_tagged_top.encode())) { |
| 274 return obj; |
| 275 } |
| 276 } |
| 277 tagged_top_ = new_tagged_top.encode(); |
| 278 return NULL; |
| 279 } |
| 280 |
| 281 inline HeapObject* PopTop() { |
| 282 TaggedTop old_tagged_top(tagged_top_); |
| 283 uint32_t curr_bot = bot_; |
| 284 |
| 285 if (curr_bot <= old_tagged_top.top()) return NULL; |
| 286 HeapObject* obj = base_[old_tagged_top.top()]; |
| 287 |
| 288 TaggedTop new_tagged_top(old_tagged_top.top() + 1, old_tagged_top.tag()); |
| 289 |
| 290 if (AtomicCompareAndSwap(&tagged_top_, |
| 291 old_tagged_top.encode(), |
| 292 new_tagged_top.encode())) { |
| 293 return obj; |
| 294 } |
| 295 |
| 296 return NULL; |
| 297 } |
| 298 |
| 299 inline bool IsFull() { return bot_ == size_; } |
| 300 inline bool IsEmpty() { return bot_ == 0; } |
| 301 |
| 302 private: |
| 303 class TaggedTop { |
| 304 public: |
| 305 explicit TaggedTop(AtomicWord encoded) |
| 306 : top_(static_cast<uint32_t>(encoded & kEncodedTopMask)), |
| 307 tag_(static_cast<uint32_t>(encoded >> kEncodedTagShift)) { |
| 308 ASSERT(encode() == encoded); |
| 309 } |
| 310 |
| 311 TaggedTop(uint32_t top, uint32_t tag) |
| 312 : top_(top), tag_(tag) { |
| 313 } |
| 314 |
| 315 |
| 316 TaggedTop(const TaggedTop& val) : top_(val.top_), tag_(val.tag_) { } |
| 317 |
| 318 inline uint32_t top() { return top_; } |
| 319 |
| 320 inline uint32_t tag() { return tag_; } |
| 321 |
| 322 inline AtomicWord encode() const { |
| 323 return (static_cast<AtomicWord>(top_) & kEncodedTopMask) | |
| 324 (static_cast<AtomicWord>(tag_) << kEncodedTagShift); |
| 325 } |
| 326 |
| 327 |
| 328 private: |
| 329 #ifdef V8_TARGET_ARCH_X64 |
| 330 static const int kTopBits = 32; |
| 331 #else |
| 332 static const int kTopBits = 21; |
| 333 #endif |
| 334 |
| 335 static const AtomicWord kEncodedTopMask = |
| 336 (static_cast<AtomicWord>(1) << kTopBits) - 1; |
| 337 |
| 338 static const int kEncodedTagShift = kTopBits; |
| 339 |
| 340 uint32_t top_; |
| 341 uint32_t tag_; |
| 342 }; |
| 343 |
| 344 volatile AtomicWord tagged_top_; |
| 345 volatile Atomic32 bot_; |
| 346 |
| 347 HeapObject* volatile* base_; |
| 348 Atomic32 size_; |
| 349 }; |
| 350 |
| 351 class Marker; |
| 352 |
| 353 // Visitor class for marking heap roots. |
| 354 class RootMarkingVisitor : public ObjectVisitor { |
| 355 public: |
| 356 explicit RootMarkingVisitor(Marker* marker) : marker_(marker) {} |
| 357 |
| 358 void VisitPointer(Object** p) { |
| 359 MarkObjectByPointer(p); |
| 360 } |
| 361 |
| 362 void VisitPointers(Object** start, Object** end) { |
| 363 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
| 364 } |
| 365 |
| 366 private: |
| 367 void MarkObjectByPointer(Object** p); |
| 368 |
| 369 Marker* marker_; |
| 370 }; |
| 371 |
| 372 // Random number generator using George Marsaglia's MWC algorithm. |
| 373 class Random { |
| 374 public: |
| 375 Random() : hi_(0), lo_(0) {} |
| 376 |
| 377 uint32_t Next () { |
| 378 // Initialize seed using the system random(). If one of the seeds |
| 379 // should ever become zero again, or if random() returns zero, we |
| 380 // avoid getting stuck with zero bits in hi or lo by re-initializing |
| 381 // them on demand. |
| 382 if (hi_ == 0) hi_ = V8::Random(); |
| 383 if (lo_ == 0) lo_ = V8::Random(); |
| 384 |
| 385 // Mix the bits. |
| 386 hi_ = 36969 * (hi_ & 0xFFFF) + (hi_ >> 16); |
| 387 lo_ = 18273 * (lo_ & 0xFFFF) + (lo_ >> 16); |
| 388 return (hi_ << 16) + (lo_ & 0xFFFF); |
| 389 } |
| 390 |
| 391 private: |
| 392 uint32_t hi_; |
| 393 uint32_t lo_; |
| 394 }; |
| 395 |
| 396 // This class is encapsulates strategy for work stealing victim selection. |
| 397 class VictimSelector { |
| 398 public: |
| 399 VictimSelector(int owner_id, int count) : owner_id_(owner_id), count_(count) { |
| 400 } |
| 401 |
| 402 int Next() { |
| 403 // When there is |
| 404 if (count_ == 2) return 1 - owner_id_; |
| 405 |
| 406 return random_.Next() % count_; |
| 407 } |
| 408 |
| 409 private: |
| 410 int owner_id_; |
| 411 int count_; |
| 412 |
| 413 Random random_; |
| 414 }; |
| 415 |
| 416 // This class represents worker performing actual marking of heap objects. |
| 417 // One of the markers (returned by method Marker::Sequential() is considered to |
| 418 // be main. Its methods can be only called from the main thread. |
| 419 // This marker's id is equal to kMainThreadMarkerId. |
| 420 // Only this marker is required for marking phase to succeed. |
| 421 // Other markers are considered auxiliary. Each of the auxiliary markers |
| 422 // owns its own thread. |
| 423 class Marker : public Thread { |
| 424 public: |
| 425 explicit Marker(int id); |
| 426 |
| 427 // Try marking given unmarked object. If marking succeeds push the object |
| 428 // into deque owned by this marker. |
| 429 // There are no synchronization and no memory barriers in HeapObject::SetMark |
| 430 // so two threads competing for the same object might both |
| 431 // succeed to claim it. |
| 432 INLINE(void ClaimObject(HeapObject* object)) { |
| 433 if (SetMark(object)) Push(object); |
| 434 ASSERT(object->IsMarked()); |
| 435 } |
| 436 |
| 437 // Push given object into deque owned by this marker. |
| 438 // If there is no space left mark an object as overflowed and |
| 439 // notify other markers about presence of overflowed objects in the heap. |
| 440 // This method is not thread safe. It is inteded to be used either by marker |
| 441 // itself or from situation when no markers are running. |
| 442 INLINE(void Push(HeapObject* object)) { |
| 443 if (!q_.IsFull()) { |
| 444 q_.PushBottom(object); |
| 445 } else { |
| 446 object->SetOverflow(); |
| 447 SetOverflowedState(); |
| 448 } |
| 449 } |
| 450 |
| 451 // Try marking given object. |
| 452 // There are no synchronization and no memory barriers in HeapObject::SetMark |
| 453 // so two threads competing for the same object might both |
| 454 // succeed to claim it. |
| 455 MUST_USE_RESULT INLINE(bool SetMark(HeapObject* obj)) { |
| 456 if (obj->SetMark()) { |
| 457 #ifdef DEBUG |
| 458 IncrementMarkedCount(); |
| 459 MarkCompactCollector::UpdateLiveObjectCount(obj); |
| 460 #endif |
| 461 return true; |
| 462 } |
| 463 return false; |
| 464 } |
| 465 |
| 466 // Try marking given object exclusively. If several threads are trying to |
| 467 // mark the same unmarked objects exclusively only one will succeed. |
| 468 MUST_USE_RESULT INLINE(bool SetMarkExclusively(HeapObject* obj)) { |
| 469 if (obj->SetMarkExclusively()) { |
| 470 #ifdef DEBUG |
| 471 IncrementMarkedCount(); |
| 472 MarkCompactCollector::UpdateLiveObjectCount(obj); |
| 473 #endif |
| 474 return true; |
| 475 } |
| 476 return false; |
| 477 } |
| 478 |
| 479 // Scan body of the object for pointers. |
| 480 void ProcessObject(HeapObject* object); |
| 481 |
| 482 // Transitively mark all objects reachable from those pushed into the deque. |
| 483 // Might leave overflowed objects in the heap. |
| 484 void TransitiveClosure(); |
| 485 |
| 486 // Scan heap for objects mark as overflowed and mark objects reachable from |
| 487 // them until there is no overflowed objects left in the heap. |
| 488 void ProcessOverflowedObjects(); |
| 489 |
| 490 // Mark objects reachable from roots. This method is thread safe. |
| 491 // All roots are separated into several groups. Different markers |
| 492 // compete for these groups. Markers that do not have work try |
| 493 // stealing it from those markers that have it. |
| 494 // |
| 495 // When this method returns the following holds: |
| 496 // - all object reachable from strong roots are marked; |
| 497 // - there are no overflowed objects in the heap; |
| 498 // - all markers have an empty deques. |
| 499 void MarkRoots(); |
| 500 |
| 501 // Initiate marking of objects reachable from roots. |
| 502 // Wake up all marker threads and comence marking of objects |
| 503 // reachable from strong roots. |
| 504 // When this method returns the same invariants hold as for |
| 505 // MarkRoots() method. |
| 506 static void MarkRootsPar(); |
| 507 |
| 508 #ifdef DEBUG |
| 509 // Atomically increment count of objects marked in the heap. |
| 510 // There is no syncronization in SetMark() method so this count |
| 511 // can be greater than actual number of alive objects. |
| 512 static void IncrementMarkedCount(); |
| 513 #endif |
| 514 |
| 515 // Select a random victim marker and if his deque is not empty |
| 516 // try stealing an object from it. |
| 517 // Stealing succeeds return stolen object otherwise return NULL. |
| 518 HeapObject* TryStealingObject(); |
| 519 |
| 520 // Returns true if there are markers with potentially non-empty |
| 521 // marking stacks. |
| 522 INLINE(bool SomebodyHasWork()) { return working_markers_ != 0; } |
| 523 |
| 524 // Raise the flag indicating that this marker potentially has non-empty |
| 525 // deque. |
| 526 INLINE(void SignalHasWork()) { |
| 527 if ((working_markers_ & (1u << id_)) == 0) { |
| 528 AtomicOr(&working_markers_, 1u << id_); |
| 529 } |
| 530 } |
| 531 |
| 532 // Clear the flag indicating that this marker potentially has non-empty |
| 533 // deque. |
| 534 INLINE(void SignalNoWork()) { |
| 535 if ((working_markers_ & (1u << id_)) != 0) { |
| 536 AtomicAnd(&working_markers_, ~(1u << id_)); |
| 537 } |
| 538 } |
| 539 |
| 540 // Returns true if heap potentially contains overflowed objects. |
| 541 INLINE(bool Overflowed()) { return overflowed_state_ != 0; } |
| 542 |
| 543 // Mark all spaces as potentially containing overflowed objects. |
| 544 INLINE(void SetOverflowedState()) { |
| 545 static const Atomic32 OVERFLOWED = (1 << (LAST_SPACE + 1)) - 1; |
| 546 if (overflowed_state_ != OVERFLOWED) { |
| 547 AtomicOr(&overflowed_state_, OVERFLOWED); |
| 548 } |
| 549 } |
| 550 |
| 551 // Body of the thread owned by each auxiliary marker. |
| 552 // Auxiliary markers sleep and wait for wakeup signal to be raised |
| 553 // by main thread. After awakening they proceed to mark objects, |
| 554 // see MarkRoots method, when all objects were marked they go back to |
| 555 // sleep until next GC. |
| 556 virtual void Run(); |
| 557 |
| 558 // Prepare markers for marking. Use memory between low and high as |
| 559 // backing store for markers' deques. |
| 560 static void Initialize(Address low, Address high); |
| 561 |
| 562 // Return main marker. |
| 563 static Marker* Sequential() { return markers_[0]; } |
| 564 |
| 565 // Return RootMarkingVisitor associated with this marker. |
| 566 inline ObjectVisitor* root_visitor() { return &root_visitor_; } |
| 567 |
| 568 // Mark objects reachable from object groups. |
| 569 // Parallel marking of object groups are not supported. |
| 570 // This method can only be called from main thread. |
| 571 // Returns true if there were any objects marked. |
| 572 bool ProcessObjectGroups(); |
| 573 |
| 574 // Auxiliary method which can be used to evenly seed deques of sleeping |
| 575 // markers. |
| 576 static void ClaimObjectByOneOfMarkers(HeapObject* object) { |
| 577 ASSERT(working_markers_ == 0); |
| 578 markers_[current_marker_]->ClaimObject(object); |
| 579 current_marker_ = (current_marker_ + 1) % |
| 580 MarkCompactCollector::NumberOfMarkers(); |
| 581 } |
| 582 |
| 583 private: |
| 584 // Use memory region between low and high as a backing store for |
| 585 // deque owned by this marker. |
| 586 void InitializeDeque(Address low, Address high); |
| 587 |
| 588 // Use iterator it to scan for overflowed objects in the heap. |
| 589 // Try pushing found objects onto deque and clear overflow mark |
| 590 // if push succeeded. |
| 591 template<class T> void ScanOverflowedObjects(T* it); |
| 592 |
| 593 // Scan heap for overflowed objects and push found objects |
| 594 // onto deque. |
| 595 // Scanning of different spaces can be performed |
| 596 // by different markers in parallel. |
| 597 // Might leave some overflowed objects in the heap. |
| 598 void ScanOverflowedObjects(); |
| 599 |
| 600 enum AllocationSpaceScanningState { AVAILABLE, UNDER_SCAN }; |
| 601 |
| 602 // Try claiming allocation space for scanning by this marker. |
| 603 // On success clear flag indicating possibility of overflowed objects |
| 604 // in this space. |
| 605 // Return true if claimed space successfully. |
| 606 inline bool BeginScanOverflowedIn(AllocationSpace space) { |
| 607 if (((overflowed_state_ & (1 << space)) != 0) && |
| 608 AtomicCompareAndSwap(&spaces_state_[space], AVAILABLE, UNDER_SCAN)) { |
| 609 AtomicAnd(&overflowed_state_, ~(1u << space)); |
| 610 return true; |
| 611 } |
| 612 return false; |
| 613 } |
| 614 |
| 615 // Release allocation space marker were scanning. |
| 616 // If deque of this marker is full then some overflowed objects might |
| 617 // have remain unscanned in the space. In this case raise flag |
| 618 // indicating this and return false. |
| 619 // Otherwise return true. |
| 620 inline bool EndScanOverflowedIn(AllocationSpace space) { |
| 621 bool overflowed = q_.IsFull(); |
| 622 if (overflowed) AtomicOr(&overflowed_state_, 1u << space); |
| 623 spaces_state_[space] = AVAILABLE; |
| 624 return !overflowed; |
| 625 } |
| 626 |
| 627 // Mark symbol table and all objects reachable from its prefix. |
| 628 void MarkSymbolTable(); |
| 629 |
| 630 // Find all unprocessed object groups containing at least one marked object. |
| 631 // Mark unmarked objects belonging to this groups and push them on the stack. |
| 632 bool MarkObjectGroups(); |
| 633 |
| 634 // Raise the flag indicating that this marker left work stealing loop. |
| 635 // It is safe to continue marking phase (e.g. processing of object groups) |
| 636 // only after all auxiliary markers leave it. |
| 637 INLINE(void SignalSleeping()) { |
| 638 if ((sleeping_markers_ & (1u << id_)) == 0) { |
| 639 AtomicOr(&sleeping_markers_, 1u << id_); |
| 640 } |
| 641 } |
| 642 |
| 643 |
| 644 uint32_t id_; |
| 645 |
| 646 VictimSelector victim_; |
| 647 |
| 648 RootMarkingVisitor root_visitor_; |
| 649 |
| 650 // Semaphore which is used to notify auxiliary markers about start of marking |
| 651 // phase. |
| 652 Semaphore* wakeup_signal_; |
| 653 |
| 654 // Marking deque. Contains grey objects that should be scanned by the marker. |
| 655 Deque q_; |
| 656 |
| 657 // Id of the marker which does not own a thread and performs marking |
| 658 // from the main thread. |
| 659 static const uint32_t kMainThreadMarkerId = 0; |
| 660 |
| 661 static const int kMaxNumberOfMarkers = 32; |
| 662 |
| 663 // Id of marker that will receive next objects during balanced seeding of |
| 664 // deques. See ClaimObjectByOneOfMarkers method. |
| 665 static uint32_t current_marker_; |
| 666 |
| 667 // Bitvector containing per space overflow state. |
| 668 // An allocation space can contain overflowed objects if and only if a bit |
| 669 // corresponding to space is set. Mapping from allocation spaces to bits |
| 670 // is defined through AllocationSpace enumeration. |
| 671 static volatile Atomic32 overflowed_state_; |
| 672 |
| 673 // When markers scan heap for overflowed objects they compete to acquire |
| 674 // exclusive scanning access to each space. This array is used |
| 675 // for syncronization. See BeginScanOverflowedIn/EndScanOverflowedIn |
| 676 // methods for more details. |
| 677 static Atomic32 spaces_state_[LAST_SPACE + 1]; |
| 678 |
| 679 // Bitvector containing information about working markers. |
| 680 // If i'th bit is set then i'th marker possibly has non empty |
| 681 // deque. |
| 682 static volatile Atomic32 working_markers_; |
| 683 |
| 684 // Bitvector containing information about sleeping markers. |
| 685 // Used to implement a barrier inside MarkRootPar() method. |
| 686 // If i'th bit is set then i'th marker left work stealing loop. |
| 687 static volatile Atomic32 sleeping_markers_; |
| 688 |
| 689 static Marker* markers_[kMaxNumberOfMarkers]; |
| 690 }; |
| 691 |
| 692 |
| 693 #ifdef DEBUG |
| 694 void Marker::IncrementMarkedCount() { |
| 695 MarkCompactCollector::tracer()->increment_marked_count(); |
| 696 } |
| 697 #endif |
| 698 |
| 699 |
| 700 void Marker::MarkRootsPar() { |
| 701 // Signal that all markers have work. |
| 702 const Atomic32 all_markers_mask = |
| 703 (1 << MarkCompactCollector::NumberOfMarkers()) - 1; |
| 704 |
| 705 sleeping_markers_ = 0; |
| 706 working_markers_ = all_markers_mask; |
| 707 |
| 708 // Wake up auxiliary markers. |
| 709 for (int i = 1; i < MarkCompactCollector::NumberOfMarkers(); i++) { |
| 710 markers_[i]->wakeup_signal_->Signal(); |
| 711 } |
| 712 |
| 713 Thread::YieldCPU(); |
| 714 |
| 715 // Commence marking. |
| 716 markers_[0]->MarkRoots(); |
| 717 |
| 718 // When MarkRoots returns there should be no working markers. |
| 719 ASSERT(working_markers_ == 0); |
| 720 |
| 721 // Wait until all markers actually leave their work stealing loops. |
| 722 while (sleeping_markers_ != all_markers_mask) Thread::YieldCPU(); |
| 723 } |
| 724 |
| 725 |
| 726 void Marker::Run() { |
| 727 // Main marker does not own a thread. |
| 728 ASSERT(id_ != kMainThreadMarkerId); |
| 729 |
| 730 while (true) { |
| 731 // Wait for wakeup call from main marker. |
| 732 wakeup_signal_->Wait(); |
| 733 |
| 734 // Commence marking. |
| 735 MarkRoots(); |
| 736 |
| 737 // When MarkRoots returns there should be no working markers. |
| 738 ASSERT(working_markers_ == 0); |
| 739 } |
| 740 } |
| 741 |
| 742 |
| 743 void Marker::TransitiveClosure() { |
| 744 while (true) { |
| 745 // Try to get object from local deque. |
| 746 HeapObject* obj = q_.PopBottom(); |
| 747 if (obj == NULL) { |
| 748 ASSERT(q_.IsEmpty()); |
| 749 return; |
| 750 } |
| 751 |
| 752 // Scan object body. |
| 753 ProcessObject(obj); |
| 754 } |
| 755 } |
| 756 |
| 757 |
| 758 static int OverflowObjectSize(HeapObject* obj) { |
| 759 // Recover the normal map pointer, it might be marked as live and |
| 760 // overflowed. |
| 761 MapWord map_word = obj->map_word(); |
| 762 map_word.ClearMark(); |
| 763 map_word.ClearOverflow(); |
| 764 return obj->SizeFromMap(map_word.ToMap()); |
| 765 } |
| 766 |
| 767 |
| 768 // Fill the marking stack with overflowed objects returned by the given |
| 769 // iterator. Stop when the marking stack is filled or the end of the space |
| 770 // is reached, whichever comes first. |
| 771 template<class T> |
| 772 void Marker::ScanOverflowedObjects(T* it) { |
| 773 // The caller should ensure that the marking stack is initially not full, |
| 774 // so that we don't waste effort pointlessly scanning for objects. |
| 775 ASSERT(!q_.IsFull()); |
| 776 |
| 777 for (HeapObject* object = it->next(); object != NULL; object = it->next()) { |
| 778 if (object->IsOverflowed()) { |
| 779 object->ClearOverflow(); |
| 780 ASSERT(object->IsMarked()); |
| 781 ASSERT(Heap::Contains(object)); |
| 782 q_.PushBottom(object); |
| 783 if (q_.IsFull()) return; |
| 784 } |
| 785 } |
| 786 } |
| 787 |
| 788 |
| 789 void Marker::ScanOverflowedObjects() { |
| 790 if (BeginScanOverflowedIn(NEW_SPACE)) { |
| 791 SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize); |
| 792 ScanOverflowedObjects(&new_it); |
| 793 if (!EndScanOverflowedIn(NEW_SPACE)) return; |
| 794 } |
| 795 |
| 796 if (BeginScanOverflowedIn(OLD_POINTER_SPACE)) { |
| 797 HeapObjectIterator old_pointer_it(Heap::old_pointer_space(), |
| 798 &OverflowObjectSize); |
| 799 ScanOverflowedObjects(&old_pointer_it); |
| 800 if (!EndScanOverflowedIn(OLD_POINTER_SPACE)) return; |
| 801 } |
| 802 |
| 803 if (BeginScanOverflowedIn(OLD_DATA_SPACE)) { |
| 804 HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize); |
| 805 ScanOverflowedObjects(&old_data_it); |
| 806 if (!EndScanOverflowedIn(OLD_DATA_SPACE)) return; |
| 807 } |
| 808 |
| 809 if (BeginScanOverflowedIn(CODE_SPACE)) { |
| 810 HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize); |
| 811 ScanOverflowedObjects(&code_it); |
| 812 if (!EndScanOverflowedIn(CODE_SPACE)) return; |
| 813 } |
| 814 |
| 815 if (BeginScanOverflowedIn(MAP_SPACE)) { |
| 816 HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize); |
| 817 ScanOverflowedObjects(&map_it); |
| 818 if (!EndScanOverflowedIn(MAP_SPACE)) return; |
| 819 } |
| 820 |
| 821 if (BeginScanOverflowedIn(CELL_SPACE)) { |
| 822 HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize); |
| 823 ScanOverflowedObjects(&cell_it); |
| 824 if (!EndScanOverflowedIn(CELL_SPACE)) return; |
| 825 } |
| 826 |
| 827 if (BeginScanOverflowedIn(LO_SPACE)) { |
| 828 LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize); |
| 829 ScanOverflowedObjects(&lo_it); |
| 830 if (!EndScanOverflowedIn(LO_SPACE)) return; |
| 831 } |
| 832 } |
| 833 |
| 834 |
| 835 void Marker::ProcessOverflowedObjects() { |
| 836 ASSERT(q_.IsEmpty()); |
| 837 |
| 838 if (Overflowed()) { |
| 839 // Signal that we might soon have objects in our deque. |
| 840 SignalHasWork(); |
| 841 |
| 842 do { |
| 843 // Fill deque with overflowed objects. |
| 844 ScanOverflowedObjects(); |
| 845 |
| 846 // Empty deque. |
| 847 TransitiveClosure(); |
| 848 } while (Overflowed()); |
| 849 |
| 850 // Signal that we do not have objects in our deque. |
| 851 ASSERT(q_.IsEmpty()); |
| 852 SignalNoWork(); |
| 853 } |
| 854 } |
| 855 |
| 856 |
| 857 void Marker::MarkRoots() { |
| 858 // Compete with other markers for root groups. |
| 859 for (int partition = kStrongRootsList; |
| 860 partition < kNumberOfRootsPartitions; |
| 861 partition++) { |
| 862 Heap::ClaimAndIterateStrongRootsGroup( |
| 863 root_visitor(), |
| 864 VISIT_ONLY_STRONG, |
| 865 static_cast<RootsPartition>(partition)); |
| 866 } |
| 867 |
| 868 // Mark SymbolTable and its prefix. |
| 869 MarkSymbolTable(); |
| 870 |
| 871 // Empty deque. |
| 872 TransitiveClosure(); |
| 873 |
| 874 // Signal that we have an empty deque. |
| 875 ASSERT(q_.IsEmpty()); |
| 876 SignalNoWork(); |
| 877 |
| 878 // While some marker potentially has non-empty deque or |
| 879 // there are potentially overflowed objects in the heap process this. |
| 880 while (SomebodyHasWork() || Overflowed()) { |
| 881 // If there are overflowed objects in the heap help scanning them. |
| 882 ProcessOverflowedObjects(); |
| 883 |
| 884 // If there is no work in local deque steal from other thread. |
| 885 HeapObject* obj = TryStealingObject(); |
| 886 |
| 887 if (obj != NULL) { |
| 888 // Scan object body and transitively mark objects reachable from it. |
| 889 ProcessObject(obj); |
| 890 TransitiveClosure(); |
| 891 ASSERT(q_.IsEmpty()); |
| 892 } else { |
| 893 // We failed to steal work. Our deque is empty. |
| 894 ASSERT(q_.IsEmpty()); |
| 895 SignalNoWork(); |
| 896 } |
| 897 } |
| 898 |
| 899 // Check invariants that must hold on the exit from MarkRoots(). |
| 900 ASSERT(q_.IsEmpty()); |
| 901 ASSERT(!Overflowed()); |
| 902 ASSERT(!SomebodyHasWork()); |
| 903 |
| 904 SignalSleeping(); |
| 905 } |
| 906 |
| 907 |
| 908 HeapObject* Marker::TryStealingObject() { |
| 909 YieldCPU(); // Give other threads chance to do something. |
| 910 |
| 911 int max_attempts = MarkCompactCollector::NumberOfMarkers() / 2; |
| 912 for (int attempt = 0; |
| 913 (attempt < max_attempts) && SomebodyHasWork(); |
| 914 attempt++) { |
| 915 Marker* marker = markers_[victim_.Next()]; |
| 916 if ((marker != this) && !marker->q_.IsEmpty()) { |
| 917 SignalHasWork(); // We will have work if stealing succeeds. |
| 918 return marker->q_.PopTop(); |
| 919 } |
| 920 } |
| 921 |
| 922 return NULL; // All attempts to steal work failed. |
| 923 } |
| 924 |
| 925 |
| 926 void Marker::InitializeDeque(Address low, Address high) { |
| 927 q_.Initialize(low, high); |
| 928 } |
| 929 |
| 930 |
| 931 void Marker::Initialize(Address low, Address high) { |
| 932 if (markers_[0] == NULL) { // Markers were not created. |
| 933 markers_[0] = new Marker(0); // Create main marker. |
| 934 |
| 935 // Create auxiliary markers and start threads owned by them. |
| 936 for (int i = 1; i < MarkCompactCollector::NumberOfMarkers(); i++) { |
| 937 markers_[i] = new Marker(i); |
| 938 markers_[i]->Start(); |
| 939 } |
| 940 } |
| 941 |
| 942 // Reset state of root groups to UNCLAIMED. |
| 943 RootsPartitioner::Reset(); |
| 944 |
| 945 // Divide memory region between low and high evenly between |
| 946 // markers. |
| 947 Address base = low; |
| 948 intptr_t step = (high - low) / MarkCompactCollector::NumberOfMarkers(); |
| 949 for (int i = 0; i < MarkCompactCollector::NumberOfMarkers(); i++) { |
| 950 markers_[i]->InitializeDeque(base, base + step); |
| 951 base += step; |
| 952 } |
| 953 |
| 954 // Initialize scanning state of spaces. |
| 955 for (int space = FIRST_SPACE; space <= LAST_SPACE; space++) { |
| 956 spaces_state_[space] = AVAILABLE; |
| 957 } |
| 958 } |
| 959 |
| 960 |
| 961 volatile Atomic32 Marker::overflowed_state_ = 0; |
| 962 |
| 963 volatile Atomic32 Marker::working_markers_ = 0; |
| 964 |
| 965 volatile Atomic32 Marker::sleeping_markers_ = 0; |
| 966 |
| 967 Marker* Marker::markers_[Marker::kMaxNumberOfMarkers]; |
| 968 |
| 969 Atomic32 Marker::spaces_state_[LAST_SPACE + 1]; |
217 | 970 |
218 | 971 |
219 static inline HeapObject* ShortCircuitConsString(Object** p) { | 972 static inline HeapObject* ShortCircuitConsString(Object** p) { |
220 // Optimization: If the heap object pointed to by p is a non-symbol | 973 // Optimization: If the heap object pointed to by p is a non-symbol |
221 // cons string whose right substring is Heap::empty_string, update | 974 // cons string whose right substring is Heap::empty_string, update |
222 // it in place to its left substring. Return the updated value. | 975 // it in place to its left substring. Return the updated value. |
223 // | 976 // |
224 // Here we assume that if we change *p, we replace it with a heap object | 977 // Here we assume that if we change *p, we replace it with a heap object |
225 // (ie, the left substring of a cons string is always a heap object). | 978 // (ie, the left substring of a cons string is always a heap object). |
226 // | 979 // |
(...skipping 19 matching lines...) Expand all Loading... |
246 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first(); | 999 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first(); |
247 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object; | 1000 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object; |
248 | 1001 |
249 *p = first; | 1002 *p = first; |
250 return HeapObject::cast(first); | 1003 return HeapObject::cast(first); |
251 } | 1004 } |
252 | 1005 |
253 | 1006 |
254 class StaticMarkingVisitor : public StaticVisitorBase { | 1007 class StaticMarkingVisitor : public StaticVisitorBase { |
255 public: | 1008 public: |
256 static inline void IterateBody(Map* map, HeapObject* obj) { | 1009 template<typename BodyDescriptor> |
257 table_.GetVisitor(map)(map, obj); | 1010 class Flexible { |
| 1011 public: |
| 1012 static inline void Visit(Map* map, |
| 1013 HeapObject* object, |
| 1014 Marker* marker) { |
| 1015 int object_size = BodyDescriptor::SizeOf(map, object); |
| 1016 StaticMarkingVisitor::IteratePointers( |
| 1017 object, BodyDescriptor::kStartOffset, object_size, marker); |
| 1018 } |
| 1019 |
| 1020 template<int object_size> |
| 1021 static inline void VisitSpecialized(Map* map, |
| 1022 HeapObject* object, |
| 1023 Marker* marker) { |
| 1024 ASSERT(BodyDescriptor::SizeOf(map, object) == object_size); |
| 1025 StaticMarkingVisitor::IteratePointers( |
| 1026 object, BodyDescriptor::kStartOffset, object_size, marker); |
| 1027 } |
| 1028 }; |
| 1029 |
| 1030 template<typename BodyDescriptor> |
| 1031 class Fixed { |
| 1032 public: |
| 1033 static inline void Visit(Map* map, |
| 1034 HeapObject* object, |
| 1035 Marker* marker) { |
| 1036 StaticMarkingVisitor::IteratePointers(object, |
| 1037 BodyDescriptor::kStartOffset, |
| 1038 BodyDescriptor::kEndOffset, |
| 1039 marker); |
| 1040 } |
| 1041 }; |
| 1042 |
| 1043 |
| 1044 static inline void IterateBody(Map* map, |
| 1045 HeapObject* obj, |
| 1046 Marker* marker) { |
| 1047 ASSERT(map->IsMarked()); |
| 1048 table_.GetVisitor(map)(map, obj, marker); |
258 } | 1049 } |
259 | 1050 |
260 static void EnableCodeFlushing(bool enabled) { | 1051 static void EnableCodeFlushing(bool enabled) { |
261 if (enabled) { | 1052 if (enabled) { |
262 table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode); | 1053 table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode); |
263 } else { | 1054 } else { |
264 table_.Register(kVisitJSFunction, &VisitJSFunction); | 1055 table_.Register(kVisitJSFunction, &VisitJSFunction); |
265 } | 1056 } |
266 } | 1057 } |
267 | 1058 |
| 1059 static void SelectMapVisitor() { |
| 1060 if (FLAG_cleanup_caches_in_maps_at_gc && FLAG_collect_maps) { |
| 1061 table_.Register(kVisitMap, &VisitMap<true, true>); |
| 1062 } else if (FLAG_cleanup_caches_in_maps_at_gc && !FLAG_collect_maps) { |
| 1063 table_.Register(kVisitMap, &VisitMap<true, false>); |
| 1064 } else if (!FLAG_cleanup_caches_in_maps_at_gc && FLAG_collect_maps) { |
| 1065 table_.Register(kVisitMap, &VisitMap<false, true>); |
| 1066 } else if (!FLAG_cleanup_caches_in_maps_at_gc && !FLAG_collect_maps) { |
| 1067 table_.Register(kVisitMap, &VisitMap<false, false>); |
| 1068 } else { |
| 1069 UNREACHABLE(); |
| 1070 } |
| 1071 } |
| 1072 |
268 static void Initialize() { | 1073 static void Initialize() { |
269 table_.Register(kVisitShortcutCandidate, | 1074 table_.Register(kVisitShortcutCandidate, |
270 &FixedBodyVisitor<StaticMarkingVisitor, | 1075 &Fixed<ConsString::BodyDescriptor>::Visit); |
271 ConsString::BodyDescriptor, | |
272 void>::Visit); | |
273 | 1076 |
274 table_.Register(kVisitConsString, | 1077 table_.Register(kVisitConsString, |
275 &FixedBodyVisitor<StaticMarkingVisitor, | 1078 &Fixed<ConsString::BodyDescriptor>::Visit); |
276 ConsString::BodyDescriptor, | |
277 void>::Visit); | |
278 | 1079 |
279 | 1080 |
280 table_.Register(kVisitFixedArray, | 1081 table_.Register(kVisitFixedArray, |
281 &FlexibleBodyVisitor<StaticMarkingVisitor, | 1082 &Flexible<FixedArray::BodyDescriptor>::Visit); |
282 FixedArray::BodyDescriptor, | |
283 void>::Visit); | |
284 | 1083 |
285 table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo); | 1084 table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo); |
286 | 1085 |
287 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); | 1086 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); |
288 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); | 1087 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); |
289 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); | 1088 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); |
290 | 1089 |
291 table_.Register(kVisitOddball, | 1090 table_.Register(kVisitOddball, |
292 &FixedBodyVisitor<StaticMarkingVisitor, | 1091 &Fixed<Oddball::BodyDescriptor>::Visit); |
293 Oddball::BodyDescriptor, | 1092 |
294 void>::Visit); | 1093 SelectMapVisitor(); |
295 table_.Register(kVisitMap, | |
296 &FixedBodyVisitor<StaticMarkingVisitor, | |
297 Map::BodyDescriptor, | |
298 void>::Visit); | |
299 | 1094 |
300 table_.Register(kVisitCode, &VisitCode); | 1095 table_.Register(kVisitCode, &VisitCode); |
301 | 1096 |
302 table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode); | 1097 table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode); |
303 | 1098 |
304 table_.Register(kVisitPropertyCell, | 1099 table_.Register(kVisitPropertyCell, |
305 &FixedBodyVisitor<StaticMarkingVisitor, | 1100 &Fixed<JSGlobalPropertyCell::BodyDescriptor>::Visit); |
306 JSGlobalPropertyCell::BodyDescriptor, | |
307 void>::Visit); | |
308 | 1101 |
309 table_.RegisterSpecializations<DataObjectVisitor, | 1102 table_.RegisterSpecializations<DataObjectVisitor, |
310 kVisitDataObject, | 1103 kVisitDataObject, |
311 kVisitDataObjectGeneric>(); | 1104 kVisitDataObjectGeneric>(); |
312 | 1105 |
313 table_.RegisterSpecializations<JSObjectVisitor, | 1106 table_.RegisterSpecializations<JSObjectVisitor, |
314 kVisitJSObject, | 1107 kVisitJSObject, |
315 kVisitJSObjectGeneric>(); | 1108 kVisitJSObjectGeneric>(); |
316 | 1109 |
317 table_.RegisterSpecializations<StructObjectVisitor, | 1110 table_.RegisterSpecializations<StructObjectVisitor, |
318 kVisitStruct, | 1111 kVisitStruct, |
319 kVisitStructGeneric>(); | 1112 kVisitStructGeneric>(); |
320 } | 1113 } |
321 | 1114 |
322 INLINE(static void VisitPointer(Object** p)) { | 1115 INLINE(static void VisitPointer(Object** p, Marker* marker)) { |
323 MarkObjectByPointer(p); | 1116 MarkObjectByPointer(p, marker); |
324 } | 1117 } |
325 | 1118 |
326 INLINE(static void VisitPointers(Object** start, Object** end)) { | 1119 INLINE(static void VisitPointers(Object** start, |
| 1120 Object** end, |
| 1121 Marker* marker)) { |
327 // Mark all objects pointed to in [start, end). | 1122 // Mark all objects pointed to in [start, end). |
328 const int kMinRangeForMarkingRecursion = 64; | 1123 const int kMinRangeForMarkingRecursion = 64; |
329 if (end - start >= kMinRangeForMarkingRecursion) { | 1124 if (end - start >= kMinRangeForMarkingRecursion) { |
330 if (VisitUnmarkedObjects(start, end)) return; | 1125 if (VisitUnmarkedObjects(start, end, marker)) return; |
331 // We are close to a stack overflow, so just mark the objects. | 1126 // We are close to a stack overflow, so just mark the objects. |
332 } | 1127 } |
333 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); | 1128 for (Object** p = start; p < end; p++) MarkObjectByPointer(p, marker); |
334 } | 1129 } |
335 | 1130 |
336 static inline void VisitCodeTarget(RelocInfo* rinfo) { | 1131 static inline void VisitCodeTarget(RelocInfo* rinfo, Marker* marker) { |
337 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); | 1132 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); |
338 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address()); | 1133 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
339 if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) { | 1134 if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) { |
340 IC::Clear(rinfo->pc()); | 1135 IC::Clear(rinfo->pc()); |
341 // Please note targets for cleared inline cached do not have to be | 1136 // Please note targets for cleared inline cached do not have to be |
342 // marked since they are contained in Heap::non_monomorphic_cache(). | 1137 // marked since they are contained in Heap::non_monomorphic_cache(). |
343 } else { | 1138 } else { |
344 MarkCompactCollector::MarkObject(code); | 1139 marker->ClaimObject(code); |
345 } | 1140 } |
346 } | 1141 } |
347 | 1142 |
348 static inline void VisitDebugTarget(RelocInfo* rinfo) { | 1143 static inline void VisitDebugTarget(RelocInfo* rinfo, Marker* marker) { |
349 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && | 1144 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && |
350 rinfo->IsPatchedReturnSequence()) || | 1145 rinfo->IsPatchedReturnSequence()) || |
351 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && | 1146 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && |
352 rinfo->IsPatchedDebugBreakSlotSequence())); | 1147 rinfo->IsPatchedDebugBreakSlotSequence())); |
353 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address()); | 1148 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address()); |
354 MarkCompactCollector::MarkObject(code); | 1149 marker->ClaimObject(code); |
355 } | 1150 } |
356 | 1151 |
357 // Mark object pointed to by p. | 1152 // Mark object pointed to by p. |
358 INLINE(static void MarkObjectByPointer(Object** p)) { | 1153 INLINE(static void MarkObjectByPointer(Object** p, Marker* marker)) { |
359 if (!(*p)->IsHeapObject()) return; | 1154 if (!(*p)->IsHeapObject()) return; |
360 HeapObject* object = ShortCircuitConsString(p); | 1155 HeapObject* object = ShortCircuitConsString(p); |
361 MarkCompactCollector::MarkObject(object); | 1156 marker->ClaimObject(object); |
362 } | 1157 } |
363 | 1158 |
364 // Visit an unmarked object. | 1159 // Visit an unmarked object. |
365 static inline void VisitUnmarkedObject(HeapObject* obj) { | 1160 static inline void VisitUnmarkedObject(HeapObject* obj, |
| 1161 Marker* marker) { |
366 #ifdef DEBUG | 1162 #ifdef DEBUG |
367 ASSERT(Heap::Contains(obj)); | 1163 ASSERT(Heap::Contains(obj)); |
368 ASSERT(!obj->IsMarked()); | |
369 #endif | 1164 #endif |
370 Map* map = obj->map(); | 1165 Map* map = obj->map(); |
371 MarkCompactCollector::SetMark(obj); | 1166 if (marker->SetMark(obj)) { |
372 // Mark the map pointer and the body. | 1167 // Mark the map pointer and the body. |
373 MarkCompactCollector::MarkObject(map); | 1168 ASSERT(map->IsHeapObject()); |
374 IterateBody(map, obj); | 1169 marker->ClaimObject(map); |
| 1170 IterateBody(map, obj, marker); |
| 1171 } |
375 } | 1172 } |
376 | 1173 |
377 // Visit all unmarked objects pointed to by [start, end). | 1174 // Visit all unmarked objects pointed to by [start, end). |
378 // Returns false if the operation fails (lack of stack space). | 1175 // Returns false if the operation fails (lack of stack space). |
379 static inline bool VisitUnmarkedObjects(Object** start, Object** end) { | 1176 static inline bool VisitUnmarkedObjects(Object** start, |
| 1177 Object** end, |
| 1178 Marker* marker) { |
380 // Return false is we are close to the stack limit. | 1179 // Return false is we are close to the stack limit. |
381 StackLimitCheck check; | 1180 StackLimitCheck check; |
382 if (check.HasOverflowed()) return false; | 1181 if (check.HasOverflowed()) return false; |
383 | 1182 |
384 // Visit the unmarked objects. | 1183 // Visit the unmarked objects. |
385 for (Object** p = start; p < end; p++) { | 1184 for (Object** p = start; p < end; p++) { |
386 if (!(*p)->IsHeapObject()) continue; | 1185 if (!(*p)->IsHeapObject()) continue; |
387 HeapObject* obj = HeapObject::cast(*p); | 1186 HeapObject* obj = HeapObject::cast(*p); |
388 if (obj->IsMarked()) continue; | 1187 if (obj->IsMarked()) continue; |
389 VisitUnmarkedObject(obj); | 1188 VisitUnmarkedObject(obj, marker); |
390 } | 1189 } |
391 return true; | 1190 return true; |
392 } | 1191 } |
393 | 1192 |
394 static inline void VisitExternalReference(Address* p) { } | 1193 static inline void VisitExternalReference(Address* p, Marker* marker) { } |
395 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { } | 1194 static inline void VisitRuntimeEntry(RelocInfo* rinfo, Marker* marker) { } |
396 | 1195 |
397 private: | 1196 private: |
398 class DataObjectVisitor { | 1197 class DataObjectVisitor { |
399 public: | 1198 public: |
400 template<int size> | 1199 template<int size> |
401 static void VisitSpecialized(Map* map, HeapObject* object) { | 1200 static void VisitSpecialized(Map* map, |
| 1201 HeapObject* object, |
| 1202 Marker* marker) { |
402 } | 1203 } |
403 | 1204 |
404 static void Visit(Map* map, HeapObject* object) { | 1205 static void Visit(Map* map, HeapObject* object, Marker* marker) { |
405 } | 1206 } |
406 }; | 1207 }; |
407 | 1208 |
408 typedef FlexibleBodyVisitor<StaticMarkingVisitor, | 1209 typedef Flexible<JSObject::BodyDescriptor> JSObjectVisitor; |
409 JSObject::BodyDescriptor, | |
410 void> JSObjectVisitor; | |
411 | 1210 |
412 typedef FlexibleBodyVisitor<StaticMarkingVisitor, | 1211 typedef Flexible<StructBodyDescriptor> StructObjectVisitor; |
413 StructBodyDescriptor, | |
414 void> StructObjectVisitor; | |
415 | 1212 |
416 static void VisitCode(Map* map, HeapObject* object) { | 1213 static void VisitCode(Map* map, HeapObject* object, Marker* marker) { |
417 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(); | 1214 reinterpret_cast<Code*>(object)-> |
| 1215 CodeIterateBody<StaticMarkingVisitor, Marker*>(marker); |
418 } | 1216 } |
419 | 1217 |
420 // Code flushing support. | 1218 // Code flushing support. |
421 | 1219 |
422 // How many collections newly compiled code object will survive before being | 1220 // How many collections newly compiled code object will survive before being |
423 // flushed. | 1221 // flushed. |
424 static const int kCodeAgeThreshold = 5; | 1222 static const int kCodeAgeThreshold = 5; |
425 | 1223 |
426 inline static bool HasSourceCode(SharedFunctionInfo* info) { | 1224 inline static bool HasSourceCode(SharedFunctionInfo* info) { |
427 Object* undefined = Heap::raw_unchecked_undefined_value(); | 1225 Object* undefined = Heap::raw_unchecked_undefined_value(); |
(...skipping 25 matching lines...) Expand all Loading... |
453 // If the shared function has been flushed but the function has not, | 1251 // If the shared function has been flushed but the function has not, |
454 // we flush the function if possible. | 1252 // we flush the function if possible. |
455 if (!IsCompiled(shared_info) && | 1253 if (!IsCompiled(shared_info) && |
456 IsCompiled(function) && | 1254 IsCompiled(function) && |
457 !function->unchecked_code()->IsMarked()) { | 1255 !function->unchecked_code()->IsMarked()) { |
458 function->set_code(shared_info->unchecked_code()); | 1256 function->set_code(shared_info->unchecked_code()); |
459 } | 1257 } |
460 return; | 1258 return; |
461 } | 1259 } |
462 | 1260 |
463 // Code is either on stack or in compilation cache. | 1261 // Code is either on stack or in compilation cache or not flushable. |
464 if (shared_info->unchecked_code()->IsMarked()) { | 1262 if (shared_info->unchecked_code()->IsMarked()) { |
465 shared_info->set_code_age(0); | 1263 shared_info->set_code_age(0); |
466 return; | 1264 return; |
467 } | 1265 } |
468 | 1266 |
469 // The function must be compiled and have the source code available, | 1267 // The function must be compiled and have the source code available, |
470 // to be able to recompile it in case we need the function again. | 1268 // to be able to recompile it in case we need the function again. |
471 if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) return; | 1269 if (!(IsCompiled(shared_info) && HasSourceCode(shared_info))) return; |
472 | 1270 |
473 // We never flush code for Api functions. | 1271 // We never flush code for Api functions. |
474 Object* function_data = shared_info->function_data(); | 1272 Object* function_data = shared_info->function_data(); |
475 if (function_data->IsHeapObject() && | 1273 if (function_data->IsHeapObject() && |
476 (SafeMap(function_data)->instance_type() == | 1274 (SafeMap(function_data)->instance_type() == |
477 FUNCTION_TEMPLATE_INFO_TYPE)) { | 1275 FUNCTION_TEMPLATE_INFO_TYPE)) { |
478 return; | 1276 return; |
479 } | 1277 } |
480 | 1278 |
481 // Only flush code for functions. | 1279 // Only flush code for functions. |
482 if (shared_info->code()->kind() != Code::FUNCTION) return; | 1280 if (shared_info->unchecked_code()->kind() != Code::FUNCTION) return; |
483 | 1281 |
484 // Function must be lazy compilable. | 1282 // Function must be lazy compilable. |
485 if (!shared_info->allows_lazy_compilation()) return; | 1283 if (!shared_info->allows_lazy_compilation()) return; |
486 | 1284 |
487 // If this is a full script wrapped in a function we do no flush the code. | 1285 // If this is a full script wrapped in a function we do no flush the code. |
488 if (shared_info->is_toplevel()) return; | 1286 if (shared_info->is_toplevel()) return; |
489 | 1287 |
490 // Age this shared function info. | 1288 // Age this shared function info. |
491 if (shared_info->code_age() < kCodeAgeThreshold) { | 1289 if (shared_info->code_age() < kCodeAgeThreshold) { |
492 shared_info->set_code_age(shared_info->code_age() + 1); | 1290 shared_info->set_code_age(shared_info->code_age() + 1); |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
527 Context* context = reinterpret_cast<Context*>(ctx); | 1325 Context* context = reinterpret_cast<Context*>(ctx); |
528 | 1326 |
529 if (IsJSBuiltinsObject(context->global())) { | 1327 if (IsJSBuiltinsObject(context->global())) { |
530 return false; | 1328 return false; |
531 } | 1329 } |
532 | 1330 |
533 return true; | 1331 return true; |
534 } | 1332 } |
535 | 1333 |
536 | 1334 |
537 static void VisitSharedFunctionInfo(Map* map, HeapObject* object) { | 1335 static void VisitSharedFunctionInfo(Map* map, |
| 1336 HeapObject* object, |
| 1337 Marker* marker) { |
538 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); | 1338 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); |
539 if (shared->IsInobjectSlackTrackingInProgress()) { | 1339 if (shared->IsInobjectSlackTrackingInProgress()) { |
540 shared->DetachInitialMap(); | 1340 shared->DetachInitialMap(); |
541 } | 1341 } |
542 FixedBodyVisitor<StaticMarkingVisitor, | 1342 Fixed<SharedFunctionInfo::BodyDescriptor>::Visit(map, object, marker); |
543 SharedFunctionInfo::BodyDescriptor, | |
544 void>::Visit(map, object); | |
545 } | 1343 } |
546 | 1344 |
547 | 1345 |
548 static void VisitCodeEntry(Address entry_address) { | 1346 static void VisitCodeEntry(Address entry_address, Marker* marker) { |
549 Object* code = Code::GetObjectFromEntryAddress(entry_address); | 1347 Object* code = Code::GetObjectFromEntryAddress(entry_address); |
550 Object* old_code = code; | 1348 Object* old_code = code; |
551 VisitPointer(&code); | 1349 VisitPointer(&code, marker); |
552 if (code != old_code) { | 1350 if (code != old_code) { |
553 Memory::Address_at(entry_address) = | 1351 Memory::Address_at(entry_address) = |
554 reinterpret_cast<Code*>(code)->entry(); | 1352 reinterpret_cast<Code*>(code)->entry(); |
555 } | 1353 } |
556 } | 1354 } |
557 | 1355 |
558 | 1356 |
559 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) { | 1357 static void VisitJSFunctionAndFlushCode(Map* map, |
| 1358 HeapObject* object, |
| 1359 Marker* marker) { |
560 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object); | 1360 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object); |
561 // The function must have a valid context and not be a builtin. | 1361 // The function must have a valid context and not be a builtin. |
562 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) { | 1362 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) { |
563 FlushCodeForFunction(jsfunction); | 1363 FlushCodeForFunction(jsfunction); |
564 } | 1364 } |
565 VisitJSFunction(map, object); | 1365 VisitJSFunction(map, object, marker); |
566 } | 1366 } |
567 | 1367 |
568 | 1368 |
569 static void VisitJSFunction(Map* map, HeapObject* object) { | 1369 static void VisitJSFunction(Map* map, HeapObject* object, Marker* marker) { |
570 #define SLOT_ADDR(obj, offset) \ | 1370 #define SLOT_ADDR(obj, offset) \ |
571 reinterpret_cast<Object**>((obj)->address() + offset) | 1371 reinterpret_cast<Object**>((obj)->address() + offset) |
572 | 1372 |
573 VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset), | 1373 VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset), |
574 SLOT_ADDR(object, JSFunction::kCodeEntryOffset)); | 1374 SLOT_ADDR(object, JSFunction::kCodeEntryOffset), |
| 1375 marker); |
575 | 1376 |
576 VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset); | 1377 VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset, marker); |
577 | 1378 |
578 VisitPointers(SLOT_ADDR(object, | 1379 VisitPointers(SLOT_ADDR(object, |
579 JSFunction::kCodeEntryOffset + kPointerSize), | 1380 JSFunction::kCodeEntryOffset + kPointerSize), |
580 SLOT_ADDR(object, JSFunction::kSize)); | 1381 SLOT_ADDR(object, JSFunction::kSize), |
| 1382 marker); |
581 #undef SLOT_ADDR | 1383 #undef SLOT_ADDR |
582 } | 1384 } |
583 | 1385 |
| 1386 template<bool cleanup_caches_in_maps_at_gc, bool collect_maps> |
| 1387 static void VisitMap(Map* meta_map, HeapObject* object, Marker* marker) { |
| 1388 Map* map = reinterpret_cast<Map*>(object); |
584 | 1389 |
585 typedef void (*Callback)(Map* map, HeapObject* object); | 1390 if (cleanup_caches_in_maps_at_gc) map->ClearCodeCache(); |
| 1391 |
| 1392 if (collect_maps && |
| 1393 map->instance_type() >= FIRST_JS_OBJECT_TYPE && |
| 1394 map->instance_type() <= JS_FUNCTION_TYPE) { |
| 1395 MarkCompactCollector::MarkMapContents(map, marker); |
| 1396 } |
| 1397 |
| 1398 Fixed<Map::BodyDescriptor>::Visit(map, object, marker); |
| 1399 } |
| 1400 |
| 1401 INLINE(static void IteratePointers(HeapObject* object, |
| 1402 int start_offset, |
| 1403 int end_offset, |
| 1404 Marker* marker)) { |
| 1405 Object** start_slot = reinterpret_cast<Object**>(object->address() + |
| 1406 start_offset); |
| 1407 Object** end_slot = reinterpret_cast<Object**>(object->address() + |
| 1408 end_offset); |
| 1409 VisitPointers(start_slot, end_slot, marker); |
| 1410 } |
| 1411 |
| 1412 typedef void (*Callback)(Map* map, |
| 1413 HeapObject* object, |
| 1414 Marker* marker); |
586 | 1415 |
587 static VisitorDispatchTable<Callback> table_; | 1416 static VisitorDispatchTable<Callback> table_; |
588 }; | 1417 }; |
589 | 1418 |
590 | 1419 |
591 VisitorDispatchTable<StaticMarkingVisitor::Callback> | 1420 VisitorDispatchTable<StaticMarkingVisitor::Callback> |
592 StaticMarkingVisitor::table_; | 1421 StaticMarkingVisitor::table_; |
593 | 1422 |
594 | 1423 |
| 1424 void Marker::ProcessObject(HeapObject* object) { |
| 1425 ASSERT(object->IsHeapObject()); |
| 1426 ASSERT(Heap::Contains(object)); |
| 1427 ASSERT(object->IsMarked()); |
| 1428 ASSERT(!object->IsOverflowed()); |
| 1429 |
| 1430 // Because the object is marked, we have to recover the original map |
| 1431 // pointer and use it to mark the object's body. |
| 1432 MapWord map_word = object->map_word(); |
| 1433 map_word.ClearMark(); |
| 1434 Map* map = map_word.ToMap(); |
| 1435 ClaimObject(map); |
| 1436 StaticMarkingVisitor::IterateBody(map, object, this); |
| 1437 } |
| 1438 |
| 1439 |
595 class MarkingVisitor : public ObjectVisitor { | 1440 class MarkingVisitor : public ObjectVisitor { |
596 public: | 1441 public: |
| 1442 explicit MarkingVisitor(Marker* marker) : marker_(marker) {} |
| 1443 |
597 void VisitPointer(Object** p) { | 1444 void VisitPointer(Object** p) { |
598 StaticMarkingVisitor::VisitPointer(p); | 1445 StaticMarkingVisitor::VisitPointer(p, marker_); |
599 } | 1446 } |
600 | 1447 |
601 void VisitPointers(Object** start, Object** end) { | 1448 void VisitPointers(Object** start, Object** end) { |
602 StaticMarkingVisitor::VisitPointers(start, end); | 1449 StaticMarkingVisitor::VisitPointers(start, end, marker_); |
603 } | 1450 } |
604 | 1451 |
605 void VisitCodeTarget(RelocInfo* rinfo) { | 1452 void VisitCodeTarget(RelocInfo* rinfo) { |
606 StaticMarkingVisitor::VisitCodeTarget(rinfo); | 1453 StaticMarkingVisitor::VisitCodeTarget(rinfo, marker_); |
607 } | 1454 } |
608 | 1455 |
609 void VisitDebugTarget(RelocInfo* rinfo) { | 1456 void VisitDebugTarget(RelocInfo* rinfo) { |
610 StaticMarkingVisitor::VisitDebugTarget(rinfo); | 1457 StaticMarkingVisitor::VisitDebugTarget(rinfo, marker_); |
611 } | 1458 } |
| 1459 private: |
| 1460 Marker* marker_; |
612 }; | 1461 }; |
613 | 1462 |
614 | 1463 |
| 1464 void Marker::MarkSymbolTable() { |
| 1465 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table(); |
| 1466 |
| 1467 // Mark the symbol table itself. |
| 1468 if (SetMarkExclusively(symbol_table)) { |
| 1469 // Explicitly mark the prefix. |
| 1470 MarkingVisitor marking_visitor(this); |
| 1471 symbol_table->IteratePrefix(&marking_visitor); |
| 1472 } |
| 1473 |
| 1474 ASSERT(symbol_table->IsMarked()); |
| 1475 } |
| 1476 |
| 1477 |
615 class CodeMarkingVisitor : public ThreadVisitor { | 1478 class CodeMarkingVisitor : public ThreadVisitor { |
616 public: | 1479 public: |
617 void VisitThread(ThreadLocalTop* top) { | 1480 void VisitThread(ThreadLocalTop* top) { |
618 for (StackFrameIterator it(top); !it.done(); it.Advance()) { | 1481 for (StackFrameIterator it(top); !it.done(); it.Advance()) { |
619 MarkCompactCollector::MarkObject(it.frame()->unchecked_code()); | 1482 Marker::ClaimObjectByOneOfMarkers(it.frame()->unchecked_code()); |
620 } | 1483 } |
621 } | 1484 } |
622 }; | 1485 }; |
623 | 1486 |
624 | 1487 |
625 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { | 1488 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { |
626 public: | 1489 public: |
627 void VisitPointers(Object** start, Object** end) { | 1490 void VisitPointers(Object** start, Object** end) { |
628 for (Object** p = start; p < end; p++) VisitPointer(p); | 1491 for (Object** p = start; p < end; p++) VisitPointer(p); |
629 } | 1492 } |
630 | 1493 |
631 void VisitPointer(Object** slot) { | 1494 void VisitPointer(Object** slot) { |
632 Object* obj = *slot; | 1495 Object* obj = *slot; |
633 if (obj->IsHeapObject()) { | 1496 if (obj->IsHeapObject()) { |
634 MarkCompactCollector::MarkObject(HeapObject::cast(obj)); | 1497 Marker::ClaimObjectByOneOfMarkers(HeapObject::cast(obj)); |
635 } | 1498 } |
636 } | 1499 } |
637 }; | 1500 }; |
638 | 1501 |
639 | 1502 |
640 void MarkCompactCollector::PrepareForCodeFlushing() { | 1503 void MarkCompactCollector::PrepareForCodeFlushing() { |
641 if (!FLAG_flush_code) { | 1504 if (!FLAG_flush_code) { |
642 StaticMarkingVisitor::EnableCodeFlushing(false); | 1505 StaticMarkingVisitor::EnableCodeFlushing(false); |
643 return; | 1506 return; |
644 } | 1507 } |
645 | 1508 |
646 #ifdef ENABLE_DEBUGGER_SUPPORT | 1509 #ifdef ENABLE_DEBUGGER_SUPPORT |
647 if (Debug::IsLoaded() || Debug::has_break_points()) { | 1510 if (Debug::IsLoaded() || Debug::has_break_points()) { |
648 StaticMarkingVisitor::EnableCodeFlushing(false); | 1511 StaticMarkingVisitor::EnableCodeFlushing(false); |
649 return; | 1512 return; |
650 } | 1513 } |
651 #endif | 1514 #endif |
652 StaticMarkingVisitor::EnableCodeFlushing(true); | 1515 StaticMarkingVisitor::EnableCodeFlushing(true); |
653 | 1516 |
654 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray | |
655 // relies on it being marked before any other descriptor array. | |
656 MarkObject(Heap::raw_unchecked_empty_descriptor_array()); | |
657 | |
658 // Make sure we are not referencing the code from the stack. | 1517 // Make sure we are not referencing the code from the stack. |
659 for (StackFrameIterator it; !it.done(); it.Advance()) { | 1518 for (StackFrameIterator it; !it.done(); it.Advance()) { |
660 MarkObject(it.frame()->unchecked_code()); | 1519 Marker::ClaimObjectByOneOfMarkers(it.frame()->unchecked_code()); |
661 } | 1520 } |
662 | 1521 |
663 // Iterate the archived stacks in all threads to check if | 1522 // Iterate the archived stacks in all threads to check if |
664 // the code is referenced. | 1523 // the code is referenced. |
665 CodeMarkingVisitor code_marking_visitor; | 1524 CodeMarkingVisitor code_marking_visitor; |
666 ThreadManager::IterateArchivedThreads(&code_marking_visitor); | 1525 ThreadManager::IterateArchivedThreads(&code_marking_visitor); |
667 | 1526 |
668 SharedFunctionInfoMarkingVisitor visitor; | 1527 SharedFunctionInfoMarkingVisitor visitor; |
669 CompilationCache::IterateFunctions(&visitor); | 1528 CompilationCache::IterateFunctions(&visitor); |
| 1529 } |
670 | 1530 |
671 ProcessMarkingStack(); | 1531 void RootMarkingVisitor::MarkObjectByPointer(Object** p) { |
| 1532 if (!(*p)->IsHeapObject()) return; |
| 1533 |
| 1534 // Replace flat cons strings in place. |
| 1535 HeapObject* object = ShortCircuitConsString(p); |
| 1536 |
| 1537 Map* map = object->map(); |
| 1538 // Mark the object. |
| 1539 if (marker_->SetMark(object)) { |
| 1540 ASSERT(map->IsHeapObject()); |
| 1541 marker_->ClaimObject(map); |
| 1542 StaticMarkingVisitor::IterateBody(map, object, marker_); |
| 1543 marker_->TransitiveClosure(); |
| 1544 } |
| 1545 } |
| 1546 |
| 1547 uint32_t Marker::current_marker_ = 0; |
| 1548 |
| 1549 Marker::Marker(int id) |
| 1550 : id_(id), |
| 1551 victim_(id, MarkCompactCollector::NumberOfMarkers()), |
| 1552 root_visitor_(this) { |
| 1553 wakeup_signal_ = OS::CreateSemaphore(0); |
672 } | 1554 } |
673 | 1555 |
674 | 1556 |
675 // Visitor class for marking heap roots. | |
676 class RootMarkingVisitor : public ObjectVisitor { | |
677 public: | |
678 void VisitPointer(Object** p) { | |
679 MarkObjectByPointer(p); | |
680 } | |
681 | |
682 void VisitPointers(Object** start, Object** end) { | |
683 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); | |
684 } | |
685 | |
686 private: | |
687 void MarkObjectByPointer(Object** p) { | |
688 if (!(*p)->IsHeapObject()) return; | |
689 | |
690 // Replace flat cons strings in place. | |
691 HeapObject* object = ShortCircuitConsString(p); | |
692 if (object->IsMarked()) return; | |
693 | |
694 Map* map = object->map(); | |
695 // Mark the object. | |
696 MarkCompactCollector::SetMark(object); | |
697 | |
698 // Mark the map pointer and body, and push them on the marking stack. | |
699 MarkCompactCollector::MarkObject(map); | |
700 StaticMarkingVisitor::IterateBody(map, object); | |
701 | |
702 // Mark all the objects reachable from the map and body. May leave | |
703 // overflowed objects in the heap. | |
704 MarkCompactCollector::EmptyMarkingStack(); | |
705 } | |
706 }; | |
707 | |
708 | |
709 // Helper class for pruning the symbol table. | 1557 // Helper class for pruning the symbol table. |
710 class SymbolTableCleaner : public ObjectVisitor { | 1558 class SymbolTableCleaner : public ObjectVisitor { |
711 public: | 1559 public: |
712 SymbolTableCleaner() : pointers_removed_(0) { } | 1560 SymbolTableCleaner() : pointers_removed_(0) { } |
713 | 1561 |
714 virtual void VisitPointers(Object** start, Object** end) { | 1562 virtual void VisitPointers(Object** start, Object** end) { |
715 // Visit all HeapObject pointers in [start, end). | 1563 // Visit all HeapObject pointers in [start, end). |
716 for (Object** p = start; p < end; p++) { | 1564 for (Object** p = start; p < end; p++) { |
717 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) { | 1565 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) { |
718 // Check if the symbol being pruned is an external symbol. We need to | 1566 // Check if the symbol being pruned is an external symbol. We need to |
(...skipping 12 matching lines...) Expand all Loading... |
731 } | 1579 } |
732 | 1580 |
733 int PointersRemoved() { | 1581 int PointersRemoved() { |
734 return pointers_removed_; | 1582 return pointers_removed_; |
735 } | 1583 } |
736 private: | 1584 private: |
737 int pointers_removed_; | 1585 int pointers_removed_; |
738 }; | 1586 }; |
739 | 1587 |
740 | 1588 |
741 void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) { | 1589 void MarkCompactCollector::MarkMapContents(Map* map, Marker* marker) { |
742 ASSERT(!object->IsMarked()); | |
743 ASSERT(Heap::Contains(object)); | |
744 if (object->IsMap()) { | |
745 Map* map = Map::cast(object); | |
746 if (FLAG_cleanup_caches_in_maps_at_gc) { | |
747 map->ClearCodeCache(); | |
748 } | |
749 SetMark(map); | |
750 if (FLAG_collect_maps && | |
751 map->instance_type() >= FIRST_JS_OBJECT_TYPE && | |
752 map->instance_type() <= JS_FUNCTION_TYPE) { | |
753 MarkMapContents(map); | |
754 } else { | |
755 marking_stack.Push(map); | |
756 } | |
757 } else { | |
758 SetMark(object); | |
759 marking_stack.Push(object); | |
760 } | |
761 } | |
762 | |
763 | |
764 void MarkCompactCollector::MarkMapContents(Map* map) { | |
765 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>( | 1590 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>( |
766 *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset))); | 1591 *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)), marker); |
767 | 1592 |
768 // Mark the Object* fields of the Map. | 1593 // Mark the Object* fields of the Map. |
769 // Since the descriptor array has been marked already, it is fine | 1594 // Since the descriptor array has been marked already, it is fine |
770 // that one of these fields contains a pointer to it. | 1595 // that one of these fields contains a pointer to it. |
771 Object** start_slot = HeapObject::RawField(map, | 1596 Object** start_slot = HeapObject::RawField(map, |
772 Map::kPointerFieldsBeginOffset); | 1597 Map::kPointerFieldsBeginOffset); |
773 | 1598 |
774 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); | 1599 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); |
775 | 1600 |
776 StaticMarkingVisitor::VisitPointers(start_slot, end_slot); | 1601 StaticMarkingVisitor::VisitPointers(start_slot, end_slot, marker); |
777 } | 1602 } |
778 | 1603 |
779 | 1604 |
780 void MarkCompactCollector::MarkDescriptorArray( | 1605 void MarkCompactCollector::MarkDescriptorArray(DescriptorArray* descriptors, |
781 DescriptorArray* descriptors) { | 1606 Marker* marker) { |
782 if (descriptors->IsMarked()) return; | 1607 if (descriptors->IsMarked()) return; |
783 // Empty descriptor array is marked as a root before any maps are marked. | 1608 // Empty descriptor array is marked as a root before any maps are marked. |
784 ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array()); | 1609 ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array()); |
785 SetMark(descriptors); | 1610 |
| 1611 if (!marker->SetMark(descriptors)) return; |
786 | 1612 |
787 FixedArray* contents = reinterpret_cast<FixedArray*>( | 1613 FixedArray* contents = reinterpret_cast<FixedArray*>( |
788 descriptors->get(DescriptorArray::kContentArrayIndex)); | 1614 descriptors->get(DescriptorArray::kContentArrayIndex)); |
| 1615 |
| 1616 MARKING_INVARIANT_ST(!contents->IsMarked() && contents->IsFixedArray()); |
| 1617 |
| 1618 if (!marker->SetMark(contents)) return; |
| 1619 |
789 ASSERT(contents->IsHeapObject()); | 1620 ASSERT(contents->IsHeapObject()); |
790 ASSERT(!contents->IsMarked()); | |
791 ASSERT(contents->IsFixedArray()); | |
792 ASSERT(contents->length() >= 2); | 1621 ASSERT(contents->length() >= 2); |
793 SetMark(contents); | 1622 |
794 // Contents contains (value, details) pairs. If the details say that | 1623 // Contents contains (value, details) pairs. If the details say that |
795 // the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or | 1624 // the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or |
796 // NULL_DESCRIPTOR, we don't mark the value as live. Only for | 1625 // NULL_DESCRIPTOR, we don't mark the value as live. Only for |
797 // MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object* (a | 1626 // MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object* (a |
798 // Map*). | 1627 // Map*). |
799 for (int i = 0; i < contents->length(); i += 2) { | 1628 for (int i = 0; i < contents->length(); i += 2) { |
800 // If the pair (value, details) at index i, i+1 is not | 1629 // If the pair (value, details) at index i, i+1 is not |
801 // a transition or null descriptor, mark the value. | 1630 // a transition or null descriptor, mark the value. |
802 PropertyDetails details(Smi::cast(contents->get(i + 1))); | 1631 PropertyDetails details(Smi::cast(contents->get(i + 1))); |
803 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) { | 1632 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) { |
804 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i)); | 1633 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i)); |
805 if (object->IsHeapObject() && !object->IsMarked()) { | 1634 if (object->IsHeapObject()) marker->ClaimObject(object); |
806 SetMark(object); | |
807 marking_stack.Push(object); | |
808 } | |
809 } | 1635 } |
810 } | 1636 } |
811 // The DescriptorArray descriptors contains a pointer to its contents array, | 1637 // The DescriptorArray descriptors contains a pointer to its contents array, |
812 // but the contents array is already marked. | 1638 // but the contents array is already marked. |
813 marking_stack.Push(descriptors); | 1639 marker->Push(descriptors); |
814 } | 1640 } |
815 | 1641 |
816 | 1642 |
817 void MarkCompactCollector::CreateBackPointers() { | 1643 void MarkCompactCollector::CreateBackPointers() { |
818 HeapObjectIterator iterator(Heap::map_space()); | 1644 HeapObjectIterator iterator(Heap::map_space()); |
819 for (HeapObject* next_object = iterator.next(); | 1645 for (HeapObject* next_object = iterator.next(); |
820 next_object != NULL; next_object = iterator.next()) { | 1646 next_object != NULL; next_object = iterator.next()) { |
821 if (next_object->IsMap()) { // Could also be ByteArray on free list. | 1647 if (next_object->IsMap()) { // Could also be ByteArray on free list. |
822 Map* map = Map::cast(next_object); | 1648 Map* map = Map::cast(next_object); |
823 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE && | 1649 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE && |
824 map->instance_type() <= JS_FUNCTION_TYPE) { | 1650 map->instance_type() <= JS_FUNCTION_TYPE) { |
825 map->CreateBackPointers(); | 1651 map->CreateBackPointers(); |
826 } else { | 1652 } else { |
827 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array()); | 1653 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array()); |
828 } | 1654 } |
829 } | 1655 } |
830 } | 1656 } |
831 } | 1657 } |
832 | 1658 |
833 | 1659 |
834 static int OverflowObjectSize(HeapObject* obj) { | |
835 // Recover the normal map pointer, it might be marked as live and | |
836 // overflowed. | |
837 MapWord map_word = obj->map_word(); | |
838 map_word.ClearMark(); | |
839 map_word.ClearOverflow(); | |
840 return obj->SizeFromMap(map_word.ToMap()); | |
841 } | |
842 | |
843 | |
844 // Fill the marking stack with overflowed objects returned by the given | |
845 // iterator. Stop when the marking stack is filled or the end of the space | |
846 // is reached, whichever comes first. | |
847 template<class T> | |
848 static void ScanOverflowedObjects(T* it) { | |
849 // The caller should ensure that the marking stack is initially not full, | |
850 // so that we don't waste effort pointlessly scanning for objects. | |
851 ASSERT(!marking_stack.is_full()); | |
852 | |
853 for (HeapObject* object = it->next(); object != NULL; object = it->next()) { | |
854 if (object->IsOverflowed()) { | |
855 object->ClearOverflow(); | |
856 ASSERT(object->IsMarked()); | |
857 ASSERT(Heap::Contains(object)); | |
858 marking_stack.Push(object); | |
859 if (marking_stack.is_full()) return; | |
860 } | |
861 } | |
862 } | |
863 | |
864 | |
865 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { | 1660 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { |
866 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked(); | 1661 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked(); |
867 } | 1662 } |
868 | 1663 |
869 | 1664 |
870 void MarkCompactCollector::MarkSymbolTable() { | 1665 void MarkCompactCollector::MarkRoots() { |
871 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table(); | 1666 // Mark the heap roots including global variables, stack variables, |
872 // Mark the symbol table itself. | 1667 // etc., and all objects reachable from them. |
873 SetMark(symbol_table); | 1668 Marker::MarkRootsPar(); |
874 // Explicitly mark the prefix. | |
875 MarkingVisitor marker; | |
876 symbol_table->IteratePrefix(&marker); | |
877 ProcessMarkingStack(); | |
878 } | 1669 } |
879 | 1670 |
880 | 1671 |
881 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { | |
882 // Mark the heap roots including global variables, stack variables, | |
883 // etc., and all objects reachable from them. | |
884 Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG); | |
885 | 1672 |
886 // Handle the symbol table specially. | 1673 bool Marker::MarkObjectGroups() { |
887 MarkSymbolTable(); | |
888 | |
889 // There may be overflowed objects in the heap. Visit them now. | |
890 while (marking_stack.overflowed()) { | |
891 RefillMarkingStack(); | |
892 EmptyMarkingStack(); | |
893 } | |
894 } | |
895 | |
896 | |
897 void MarkCompactCollector::MarkObjectGroups() { | |
898 List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups(); | 1674 List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups(); |
899 | 1675 |
900 for (int i = 0; i < object_groups->length(); i++) { | 1676 bool marked = false; |
| 1677 |
| 1678 for (int i = id_; i < object_groups->length(); i++) { |
901 ObjectGroup* entry = object_groups->at(i); | 1679 ObjectGroup* entry = object_groups->at(i); |
902 if (entry == NULL) continue; | 1680 if (entry == NULL) continue; |
903 | 1681 |
904 List<Object**>& objects = entry->objects_; | 1682 List<Object**>& objects = entry->objects_; |
905 bool group_marked = false; | 1683 bool group_marked = false; |
906 for (int j = 0; j < objects.length(); j++) { | 1684 for (int j = 0; j < objects.length(); j++) { |
907 Object* object = *objects[j]; | 1685 Object* object = *objects[j]; |
908 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) { | 1686 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) { |
909 group_marked = true; | 1687 group_marked = true; |
910 break; | 1688 break; |
911 } | 1689 } |
912 } | 1690 } |
913 | 1691 |
914 if (!group_marked) continue; | 1692 if (!group_marked) continue; |
915 | 1693 |
916 // An object in the group is marked, so mark as gray all white heap | 1694 // An object in the group is marked, so mark as gray all white heap |
917 // objects in the group. | 1695 // objects in the group. |
918 for (int j = 0; j < objects.length(); ++j) { | 1696 for (int j = 0; j < objects.length(); ++j) { |
919 if ((*objects[j])->IsHeapObject()) { | 1697 HeapObject* object = reinterpret_cast<HeapObject*>(*objects[j]); |
920 MarkObject(HeapObject::cast(*objects[j])); | 1698 if (object->IsHeapObject()) { |
| 1699 marked = marked || !object->IsMarked(); |
| 1700 ClaimObject(HeapObject::cast(*objects[j])); |
921 } | 1701 } |
922 } | 1702 } |
| 1703 |
923 // Once the entire group has been colored gray, set the object group | 1704 // Once the entire group has been colored gray, set the object group |
924 // to NULL so it won't be processed again. | 1705 // to NULL so it won't be processed again. |
925 delete object_groups->at(i); | 1706 delete object_groups->at(i); |
926 object_groups->at(i) = NULL; | 1707 object_groups->at(i) = NULL; |
927 } | 1708 } |
| 1709 |
| 1710 return marked; |
928 } | 1711 } |
929 | 1712 |
930 | 1713 |
931 // Mark all objects reachable from the objects on the marking stack. | 1714 bool Marker::ProcessObjectGroups() { |
932 // Before: the marking stack contains zero or more heap object pointers. | 1715 ASSERT(working_markers_ == 0); |
933 // After: the marking stack is empty, and all objects reachable from the | 1716 ASSERT(id_ == kMainThreadMarkerId); |
934 // marking stack have been marked, or are overflowed in the heap. | |
935 void MarkCompactCollector::EmptyMarkingStack() { | |
936 while (!marking_stack.is_empty()) { | |
937 HeapObject* object = marking_stack.Pop(); | |
938 ASSERT(object->IsHeapObject()); | |
939 ASSERT(Heap::Contains(object)); | |
940 ASSERT(object->IsMarked()); | |
941 ASSERT(!object->IsOverflowed()); | |
942 | 1717 |
943 // Because the object is marked, we have to recover the original map | 1718 bool found_unprocessed_group = false; |
944 // pointer and use it to mark the object's body. | 1719 while (MarkObjectGroups()) { |
945 MapWord map_word = object->map_word(); | 1720 TransitiveClosure(); |
946 map_word.ClearMark(); | 1721 ProcessOverflowedObjects(); |
947 Map* map = map_word.ToMap(); | 1722 found_unprocessed_group = true; |
948 MarkObject(map); | 1723 } |
949 | 1724 |
950 StaticMarkingVisitor::IterateBody(map, object); | 1725 return found_unprocessed_group; |
951 } | |
952 } | 1726 } |
953 | 1727 |
954 | 1728 |
955 // Sweep the heap for overflowed objects, clear their overflow bits, and | |
956 // push them on the marking stack. Stop early if the marking stack fills | |
957 // before sweeping completes. If sweeping completes, there are no remaining | |
958 // overflowed objects in the heap so the overflow flag on the markings stack | |
959 // is cleared. | |
960 void MarkCompactCollector::RefillMarkingStack() { | |
961 ASSERT(marking_stack.overflowed()); | |
962 | |
963 SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize); | |
964 ScanOverflowedObjects(&new_it); | |
965 if (marking_stack.is_full()) return; | |
966 | |
967 HeapObjectIterator old_pointer_it(Heap::old_pointer_space(), | |
968 &OverflowObjectSize); | |
969 ScanOverflowedObjects(&old_pointer_it); | |
970 if (marking_stack.is_full()) return; | |
971 | |
972 HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize); | |
973 ScanOverflowedObjects(&old_data_it); | |
974 if (marking_stack.is_full()) return; | |
975 | |
976 HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize); | |
977 ScanOverflowedObjects(&code_it); | |
978 if (marking_stack.is_full()) return; | |
979 | |
980 HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize); | |
981 ScanOverflowedObjects(&map_it); | |
982 if (marking_stack.is_full()) return; | |
983 | |
984 HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize); | |
985 ScanOverflowedObjects(&cell_it); | |
986 if (marking_stack.is_full()) return; | |
987 | |
988 LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize); | |
989 ScanOverflowedObjects(&lo_it); | |
990 if (marking_stack.is_full()) return; | |
991 | |
992 marking_stack.clear_overflowed(); | |
993 } | |
994 | |
995 | |
996 // Mark all objects reachable (transitively) from objects on the marking | |
997 // stack. Before: the marking stack contains zero or more heap object | |
998 // pointers. After: the marking stack is empty and there are no overflowed | |
999 // objects in the heap. | |
1000 void MarkCompactCollector::ProcessMarkingStack() { | |
1001 EmptyMarkingStack(); | |
1002 while (marking_stack.overflowed()) { | |
1003 RefillMarkingStack(); | |
1004 EmptyMarkingStack(); | |
1005 } | |
1006 } | |
1007 | |
1008 | |
1009 void MarkCompactCollector::ProcessObjectGroups() { | |
1010 bool work_to_do = true; | |
1011 ASSERT(marking_stack.is_empty()); | |
1012 while (work_to_do) { | |
1013 MarkObjectGroups(); | |
1014 work_to_do = !marking_stack.is_empty(); | |
1015 ProcessMarkingStack(); | |
1016 } | |
1017 } | |
1018 | |
1019 | |
1020 void MarkCompactCollector::MarkLiveObjects() { | 1729 void MarkCompactCollector::MarkLiveObjects() { |
1021 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK); | 1730 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK); |
1022 #ifdef DEBUG | 1731 #ifdef DEBUG |
1023 ASSERT(state_ == PREPARE_GC); | 1732 ASSERT(state_ == PREPARE_GC); |
1024 state_ = MARK_LIVE_OBJECTS; | 1733 state_ = MARK_LIVE_OBJECTS; |
1025 #endif | 1734 #endif |
1026 // The to space contains live objects, the from space is used as a marking | 1735 // The to space contains live objects, the from space is used as a marking |
1027 // stack. | 1736 // stack. |
1028 marking_stack.Initialize(Heap::new_space()->FromSpaceLow(), | 1737 Marker::Initialize(Heap::new_space()->FromSpaceLow(), |
1029 Heap::new_space()->FromSpaceHigh()); | 1738 Heap::new_space()->FromSpaceHigh()); |
1030 | 1739 |
1031 ASSERT(!marking_stack.overflowed()); | 1740 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray |
| 1741 // relies on it being marked before any other descriptor array. |
| 1742 Marker::ClaimObjectByOneOfMarkers(Heap::raw_unchecked_empty_descriptor_array()
); |
| 1743 |
| 1744 StaticMarkingVisitor::SelectMapVisitor(); |
1032 | 1745 |
1033 PrepareForCodeFlushing(); | 1746 PrepareForCodeFlushing(); |
1034 | 1747 |
1035 RootMarkingVisitor root_visitor; | 1748 MarkRoots(); |
1036 MarkRoots(&root_visitor); | |
1037 | 1749 |
1038 // The objects reachable from the roots are marked, yet unreachable | 1750 Marker::Sequential()->ProcessObjectGroups(); |
1039 // objects are unmarked. Mark objects reachable from object groups | |
1040 // containing at least one marked object, and continue until no new | |
1041 // objects are reachable from the object groups. | |
1042 ProcessObjectGroups(); | |
1043 | 1751 |
1044 // The objects reachable from the roots or object groups are marked, | 1752 // The objects reachable from the roots or object groups are marked, |
1045 // yet unreachable objects are unmarked. Mark objects reachable | 1753 // yet unreachable objects are unmarked. Mark objects reachable |
1046 // only from weak global handles. | 1754 // only from weak global handles. |
1047 // | 1755 // |
1048 // First we identify nonlive weak handles and mark them as pending | 1756 // First we identify nonlive weak handles and mark them as pending |
1049 // destruction. | 1757 // destruction. |
1050 GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject); | 1758 GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject); |
1051 // Then we mark the objects and process the transitive closure. | 1759 // Then we mark the objects and process the transitive closure. |
1052 GlobalHandles::IterateWeakRoots(&root_visitor); | 1760 GlobalHandles::IterateWeakRoots(Marker::Sequential()->root_visitor()); |
1053 while (marking_stack.overflowed()) { | 1761 Marker::Sequential()->ProcessOverflowedObjects(); |
1054 RefillMarkingStack(); | |
1055 EmptyMarkingStack(); | |
1056 } | |
1057 | 1762 |
1058 // Repeat the object groups to mark unmarked groups reachable from the | 1763 // Repeat the object groups to mark unmarked groups reachable from the |
1059 // weak roots. | 1764 // weak roots. |
1060 ProcessObjectGroups(); | 1765 Marker::Sequential()->ProcessObjectGroups(); |
1061 | 1766 |
1062 // Prune the symbol table removing all symbols only pointed to by the | 1767 // Prune the symbol table removing all symbols only pointed to by the |
1063 // symbol table. Cannot use symbol_table() here because the symbol | 1768 // symbol table. Cannot use symbol_table() here because the symbol |
1064 // table is marked. | 1769 // table is marked. |
1065 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table(); | 1770 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table(); |
1066 SymbolTableCleaner v; | 1771 SymbolTableCleaner v; |
1067 symbol_table->IterateElements(&v); | 1772 symbol_table->IterateElements(&v); |
1068 symbol_table->ElementsRemoved(v.PointersRemoved()); | 1773 symbol_table->ElementsRemoved(v.PointersRemoved()); |
1069 ExternalStringTable::Iterate(&v); | 1774 ExternalStringTable::Iterate(&v); |
1070 ExternalStringTable::CleanUp(); | 1775 ExternalStringTable::CleanUp(); |
1071 | 1776 |
1072 // Remove object groups after marking phase. | 1777 // Remove object groups after marking phase. |
1073 GlobalHandles::RemoveObjectGroups(); | 1778 GlobalHandles::RemoveObjectGroups(); |
1074 } | 1779 } |
1075 | 1780 |
1076 | 1781 |
1077 static int CountMarkedCallback(HeapObject* obj) { | 1782 static int CountMarkedCallback(HeapObject* obj) { |
1078 MapWord map_word = obj->map_word(); | 1783 MapWord map_word = obj->map_word(); |
1079 map_word.ClearMark(); | 1784 map_word.ClearMark(); |
1080 return obj->SizeFromMap(map_word.ToMap()); | 1785 return obj->SizeFromMap(map_word.ToMap()); |
1081 } | 1786 } |
1082 | 1787 |
1083 | 1788 |
1084 #ifdef DEBUG | 1789 #ifdef DEBUG |
| 1790 static int SafeSizeOf(HeapObject* obj) { |
| 1791 MapWord map_word = obj->map_word(); |
| 1792 map_word.ClearMark(); |
| 1793 map_word.ClearOverflow(); |
| 1794 return obj->SizeFromMap(map_word.ToMap()); |
| 1795 } |
| 1796 |
1085 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { | 1797 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { |
1086 live_bytes_ += obj->Size(); | 1798 const int size = SafeSizeOf(obj); |
| 1799 AtomicAdd(&live_bytes_, size); |
1087 if (Heap::new_space()->Contains(obj)) { | 1800 if (Heap::new_space()->Contains(obj)) { |
1088 live_young_objects_size_ += obj->Size(); | 1801 AtomicAdd(&live_young_objects_size_, size); |
1089 } else if (Heap::map_space()->Contains(obj)) { | 1802 } else if (Heap::map_space()->Contains(obj)) { |
1090 ASSERT(obj->IsMap()); | 1803 ASSERT(SafeIsMap(obj)); |
1091 live_map_objects_size_ += obj->Size(); | 1804 AtomicAdd(&live_map_objects_size_, size); |
1092 } else if (Heap::cell_space()->Contains(obj)) { | 1805 } else if (Heap::cell_space()->Contains(obj)) { |
1093 ASSERT(obj->IsJSGlobalPropertyCell()); | 1806 AtomicAdd(&live_cell_objects_size_, size); |
1094 live_cell_objects_size_ += obj->Size(); | |
1095 } else if (Heap::old_pointer_space()->Contains(obj)) { | 1807 } else if (Heap::old_pointer_space()->Contains(obj)) { |
1096 live_old_pointer_objects_size_ += obj->Size(); | 1808 AtomicAdd(&live_old_pointer_objects_size_, size); |
1097 } else if (Heap::old_data_space()->Contains(obj)) { | 1809 } else if (Heap::old_data_space()->Contains(obj)) { |
1098 live_old_data_objects_size_ += obj->Size(); | 1810 AtomicAdd(&live_old_data_objects_size_, size); |
1099 } else if (Heap::code_space()->Contains(obj)) { | 1811 } else if (Heap::code_space()->Contains(obj)) { |
1100 live_code_objects_size_ += obj->Size(); | 1812 AtomicAdd(&live_code_objects_size_, size); |
1101 } else if (Heap::lo_space()->Contains(obj)) { | 1813 } else if (Heap::lo_space()->Contains(obj)) { |
1102 live_lo_objects_size_ += obj->Size(); | 1814 AtomicAdd(&live_lo_objects_size_, size); |
1103 } else { | 1815 } else { |
1104 UNREACHABLE(); | 1816 UNREACHABLE(); |
1105 } | 1817 } |
1106 } | 1818 } |
1107 #endif // DEBUG | 1819 #endif // DEBUG |
1108 | 1820 |
1109 | 1821 |
1110 void MarkCompactCollector::SweepLargeObjectSpace() { | 1822 void MarkCompactCollector::SweepLargeObjectSpace() { |
1111 #ifdef DEBUG | 1823 #ifdef DEBUG |
1112 ASSERT(state_ == MARK_LIVE_OBJECTS); | 1824 ASSERT(state_ == MARK_LIVE_OBJECTS); |
1113 state_ = | 1825 state_ = |
1114 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES; | 1826 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES; |
1115 #endif | 1827 #endif |
1116 // Deallocate unmarked objects and clear marked bits for marked objects. | 1828 // Deallocate unmarked objects and clear marked bits for marked objects. |
1117 Heap::lo_space()->FreeUnmarkedObjects(); | 1829 Heap::lo_space()->FreeUnmarkedObjects(); |
1118 } | 1830 } |
1119 | 1831 |
1120 | 1832 |
1121 // Safe to use during marking phase only. | 1833 // Safe to use during marking phase only. |
1122 bool MarkCompactCollector::SafeIsMap(HeapObject* object) { | 1834 bool MarkCompactCollector::SafeIsMap(HeapObject* object) { |
1123 MapWord metamap = object->map_word(); | 1835 MapWord metamap = object->map_word(); |
1124 metamap.ClearMark(); | 1836 metamap.ClearMark(); |
| 1837 metamap.ClearOverflow(); |
1125 return metamap.ToMap()->instance_type() == MAP_TYPE; | 1838 return metamap.ToMap()->instance_type() == MAP_TYPE; |
1126 } | 1839 } |
1127 | 1840 |
1128 | 1841 |
1129 void MarkCompactCollector::ClearNonLiveTransitions() { | 1842 void MarkCompactCollector::ClearNonLiveTransitions() { |
1130 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback); | 1843 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback); |
1131 // Iterate over the map space, setting map transitions that go from | 1844 // Iterate over the map space, setting map transitions that go from |
1132 // a marked map to an unmarked map to null transitions. At the same time, | 1845 // a marked map to an unmarked map to null transitions. At the same time, |
1133 // set all the prototype fields of maps back to their original value, | 1846 // set all the prototype fields of maps back to their original value, |
1134 // dropping the back pointers temporarily stored in the prototype field. | 1847 // dropping the back pointers temporarily stored in the prototype field. |
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1345 // A flag giving the state of the previously swept object. Initially true | 2058 // A flag giving the state of the previously swept object. Initially true |
1346 // to ensure that free_start is initialized to a proper address before | 2059 // to ensure that free_start is initialized to a proper address before |
1347 // trying to write to it. | 2060 // trying to write to it. |
1348 bool is_prev_alive = true; | 2061 bool is_prev_alive = true; |
1349 | 2062 |
1350 int object_size; // Will be set on each iteration of the loop. | 2063 int object_size; // Will be set on each iteration of the loop. |
1351 for (Address current = start; current < end; current += object_size) { | 2064 for (Address current = start; current < end; current += object_size) { |
1352 HeapObject* object = HeapObject::FromAddress(current); | 2065 HeapObject* object = HeapObject::FromAddress(current); |
1353 if (object->IsMarked()) { | 2066 if (object->IsMarked()) { |
1354 object->ClearMark(); | 2067 object->ClearMark(); |
| 2068 #ifdef DEBUG |
1355 MarkCompactCollector::tracer()->decrement_marked_count(); | 2069 MarkCompactCollector::tracer()->decrement_marked_count(); |
| 2070 #endif |
1356 object_size = object->Size(); | 2071 object_size = object->Size(); |
1357 | 2072 |
1358 Object* forwarded = Alloc(object, object_size); | 2073 Object* forwarded = Alloc(object, object_size); |
1359 // Allocation cannot fail, because we are compacting the space. | 2074 // Allocation cannot fail, because we are compacting the space. |
1360 ASSERT(!forwarded->IsFailure()); | 2075 ASSERT(!forwarded->IsFailure()); |
1361 Encode(object, object_size, forwarded, offset); | 2076 Encode(object, object_size, forwarded, offset); |
1362 | 2077 |
1363 #ifdef DEBUG | 2078 #ifdef DEBUG |
1364 if (FLAG_gc_verbose) { | 2079 if (FLAG_gc_verbose) { |
1365 PrintF("forward %p -> %p.\n", object->address(), | 2080 PrintF("forward %p -> %p.\n", object->address(), |
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1570 int size = 0; | 2285 int size = 0; |
1571 int survivors_size = 0; | 2286 int survivors_size = 0; |
1572 | 2287 |
1573 // First pass: traverse all objects in inactive semispace, remove marks, | 2288 // First pass: traverse all objects in inactive semispace, remove marks, |
1574 // migrate live objects and write forwarding addresses. | 2289 // migrate live objects and write forwarding addresses. |
1575 for (Address current = from_bottom; current < from_top; current += size) { | 2290 for (Address current = from_bottom; current < from_top; current += size) { |
1576 HeapObject* object = HeapObject::FromAddress(current); | 2291 HeapObject* object = HeapObject::FromAddress(current); |
1577 | 2292 |
1578 if (object->IsMarked()) { | 2293 if (object->IsMarked()) { |
1579 object->ClearMark(); | 2294 object->ClearMark(); |
| 2295 #ifdef DEBUG |
1580 MarkCompactCollector::tracer()->decrement_marked_count(); | 2296 MarkCompactCollector::tracer()->decrement_marked_count(); |
| 2297 #endif |
| 2298 |
| 2299 ASSERT(object->map()->IsMarked()); |
1581 | 2300 |
1582 size = object->Size(); | 2301 size = object->Size(); |
1583 survivors_size += size; | 2302 survivors_size += size; |
1584 | 2303 |
1585 // Aggressively promote young survivors to the old space. | 2304 // Aggressively promote young survivors to the old space. |
1586 if (TryPromoteObject(object, size)) { | 2305 if (TryPromoteObject(object, size)) { |
1587 continue; | 2306 continue; |
1588 } | 2307 } |
1589 | 2308 |
1590 // Promotion failed. Just migrate object to another semispace. | 2309 // Promotion failed. Just migrate object to another semispace. |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1680 bool is_previous_alive = true; | 2399 bool is_previous_alive = true; |
1681 Address free_start = NULL; | 2400 Address free_start = NULL; |
1682 HeapObject* object; | 2401 HeapObject* object; |
1683 | 2402 |
1684 for (Address current = p->ObjectAreaStart(); | 2403 for (Address current = p->ObjectAreaStart(); |
1685 current < p->AllocationTop(); | 2404 current < p->AllocationTop(); |
1686 current += object->Size()) { | 2405 current += object->Size()) { |
1687 object = HeapObject::FromAddress(current); | 2406 object = HeapObject::FromAddress(current); |
1688 if (object->IsMarked()) { | 2407 if (object->IsMarked()) { |
1689 object->ClearMark(); | 2408 object->ClearMark(); |
| 2409 #ifdef DEBUG |
1690 MarkCompactCollector::tracer()->decrement_marked_count(); | 2410 MarkCompactCollector::tracer()->decrement_marked_count(); |
1691 | 2411 #endif |
1692 if (!is_previous_alive) { // Transition from free to live. | 2412 if (!is_previous_alive) { // Transition from free to live. |
1693 space->DeallocateBlock(free_start, | 2413 space->DeallocateBlock(free_start, |
1694 static_cast<int>(current - free_start), | 2414 static_cast<int>(current - free_start), |
1695 true); | 2415 true); |
1696 is_previous_alive = true; | 2416 is_previous_alive = true; |
1697 } | 2417 } |
1698 } else { | 2418 } else { |
1699 MarkCompactCollector::ReportDeleteIfNeeded(object); | 2419 MarkCompactCollector::ReportDeleteIfNeeded(object); |
1700 if (is_previous_alive) { // Transition from live to free. | 2420 if (is_previous_alive) { // Transition from live to free. |
1701 free_start = current; | 2421 free_start = current; |
(...skipping 363 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2065 } | 2785 } |
2066 SweepSpace(Heap::map_space()); | 2786 SweepSpace(Heap::map_space()); |
2067 | 2787 |
2068 Heap::IterateDirtyRegions(Heap::map_space(), | 2788 Heap::IterateDirtyRegions(Heap::map_space(), |
2069 &Heap::IteratePointersInDirtyMapsRegion, | 2789 &Heap::IteratePointersInDirtyMapsRegion, |
2070 &UpdatePointerToNewGen, | 2790 &UpdatePointerToNewGen, |
2071 Heap::WATERMARK_SHOULD_BE_VALID); | 2791 Heap::WATERMARK_SHOULD_BE_VALID); |
2072 | 2792 |
2073 intptr_t live_maps_size = Heap::map_space()->Size(); | 2793 intptr_t live_maps_size = Heap::map_space()->Size(); |
2074 int live_maps = static_cast<int>(live_maps_size / Map::kSize); | 2794 int live_maps = static_cast<int>(live_maps_size / Map::kSize); |
2075 ASSERT(live_map_objects_size_ == live_maps_size); | 2795 MARKING_INVARIANT_ST(live_map_objects_size_ == live_maps_size); |
2076 | 2796 |
2077 if (Heap::map_space()->NeedsCompaction(live_maps)) { | 2797 if (Heap::map_space()->NeedsCompaction(live_maps)) { |
2078 MapCompact map_compact(live_maps); | 2798 MapCompact map_compact(live_maps); |
2079 | 2799 |
2080 map_compact.CompactMaps(); | 2800 map_compact.CompactMaps(); |
2081 map_compact.UpdateMapPointersInRoots(); | 2801 map_compact.UpdateMapPointersInRoots(); |
2082 | 2802 |
2083 PagedSpaces spaces; | 2803 PagedSpaces spaces; |
2084 for (PagedSpace* space = spaces.next(); | 2804 for (PagedSpace* space = spaces.next(); |
2085 space != NULL; space = spaces.next()) { | 2805 space != NULL; space = spaces.next()) { |
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2262 LargeObjectIterator it(Heap::lo_space()); | 2982 LargeObjectIterator it(Heap::lo_space()); |
2263 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) | 2983 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) |
2264 UpdatePointersInNewObject(obj); | 2984 UpdatePointersInNewObject(obj); |
2265 | 2985 |
2266 USE(live_maps_size); | 2986 USE(live_maps_size); |
2267 USE(live_pointer_olds_size); | 2987 USE(live_pointer_olds_size); |
2268 USE(live_data_olds_size); | 2988 USE(live_data_olds_size); |
2269 USE(live_codes_size); | 2989 USE(live_codes_size); |
2270 USE(live_cells_size); | 2990 USE(live_cells_size); |
2271 USE(live_news_size); | 2991 USE(live_news_size); |
2272 ASSERT(live_maps_size == live_map_objects_size_); | 2992 MARKING_INVARIANT_ST(live_maps_size == live_map_objects_size_); |
2273 ASSERT(live_data_olds_size == live_old_data_objects_size_); | 2993 MARKING_INVARIANT_ST(live_data_olds_size == live_old_data_objects_size_); |
2274 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); | 2994 MARKING_INVARIANT_ST(live_pointer_olds_size == |
2275 ASSERT(live_codes_size == live_code_objects_size_); | 2995 live_old_pointer_objects_size_); |
2276 ASSERT(live_cells_size == live_cell_objects_size_); | 2996 MARKING_INVARIANT_ST(live_codes_size == live_code_objects_size_); |
2277 ASSERT(live_news_size == live_young_objects_size_); | 2997 MARKING_INVARIANT_ST(live_cells_size == live_cell_objects_size_); |
| 2998 MARKING_INVARIANT_ST(live_news_size == live_young_objects_size_); |
2278 } | 2999 } |
2279 | 3000 |
2280 | 3001 |
2281 int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) { | 3002 int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) { |
2282 // Keep old map pointers | 3003 // Keep old map pointers |
2283 Map* old_map = obj->map(); | 3004 Map* old_map = obj->map(); |
2284 ASSERT(old_map->IsHeapObject()); | 3005 ASSERT(old_map->IsHeapObject()); |
2285 | 3006 |
2286 Address forwarded = GetForwardingAddressInOldSpace(old_map); | 3007 Address forwarded = GetForwardingAddressInOldSpace(old_map); |
2287 | 3008 |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2400 &RelocateCellObject); | 3121 &RelocateCellObject); |
2401 int live_news_size = IterateLiveObjects(Heap::new_space(), | 3122 int live_news_size = IterateLiveObjects(Heap::new_space(), |
2402 &RelocateNewObject); | 3123 &RelocateNewObject); |
2403 | 3124 |
2404 USE(live_maps_size); | 3125 USE(live_maps_size); |
2405 USE(live_pointer_olds_size); | 3126 USE(live_pointer_olds_size); |
2406 USE(live_data_olds_size); | 3127 USE(live_data_olds_size); |
2407 USE(live_codes_size); | 3128 USE(live_codes_size); |
2408 USE(live_cells_size); | 3129 USE(live_cells_size); |
2409 USE(live_news_size); | 3130 USE(live_news_size); |
2410 ASSERT(live_maps_size == live_map_objects_size_); | 3131 |
2411 ASSERT(live_data_olds_size == live_old_data_objects_size_); | 3132 MARKING_INVARIANT_ST(live_maps_size == live_map_objects_size_); |
2412 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); | 3133 MARKING_INVARIANT_ST(live_data_olds_size == live_old_data_objects_size_); |
2413 ASSERT(live_codes_size == live_code_objects_size_); | 3134 MARKING_INVARIANT_ST( |
2414 ASSERT(live_cells_size == live_cell_objects_size_); | 3135 live_pointer_olds_size == live_old_pointer_objects_size_); |
2415 ASSERT(live_news_size == live_young_objects_size_); | 3136 MARKING_INVARIANT_ST(live_codes_size == live_code_objects_size_); |
| 3137 MARKING_INVARIANT_ST(live_cells_size == live_cell_objects_size_); |
| 3138 MARKING_INVARIANT_ST(live_news_size == live_young_objects_size_); |
2416 | 3139 |
2417 // Flip from and to spaces | 3140 // Flip from and to spaces |
2418 Heap::new_space()->Flip(); | 3141 Heap::new_space()->Flip(); |
2419 | 3142 |
2420 Heap::new_space()->MCCommitRelocationInfo(); | 3143 Heap::new_space()->MCCommitRelocationInfo(); |
2421 | 3144 |
2422 // Set age_mark to bottom in to space | 3145 // Set age_mark to bottom in to space |
2423 Address mark = Heap::new_space()->bottom(); | 3146 Address mark = Heap::new_space()->bottom(); |
2424 Heap::new_space()->set_age_mark(mark); | 3147 Heap::new_space()->set_age_mark(mark); |
2425 | 3148 |
(...skipping 208 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2634 } | 3357 } |
2635 | 3358 |
2636 | 3359 |
2637 void MarkCompactCollector::Initialize() { | 3360 void MarkCompactCollector::Initialize() { |
2638 StaticPointersToNewGenUpdatingVisitor::Initialize(); | 3361 StaticPointersToNewGenUpdatingVisitor::Initialize(); |
2639 StaticMarkingVisitor::Initialize(); | 3362 StaticMarkingVisitor::Initialize(); |
2640 } | 3363 } |
2641 | 3364 |
2642 | 3365 |
2643 } } // namespace v8::internal | 3366 } } // namespace v8::internal |
OLD | NEW |