OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
288 } | 288 } |
289 | 289 |
290 ~SlotsBuffer() { | 290 ~SlotsBuffer() { |
291 } | 291 } |
292 | 292 |
293 void Add(ObjectSlot slot) { | 293 void Add(ObjectSlot slot) { |
294 ASSERT(0 <= idx_ && idx_ < kNumberOfElements); | 294 ASSERT(0 <= idx_ && idx_ < kNumberOfElements); |
295 slots_[idx_++] = slot; | 295 slots_[idx_++] = slot; |
296 } | 296 } |
297 | 297 |
298 void UpdateSlots(); | 298 enum SlotType { |
| 299 NONE, |
| 300 RELOCATED_CODE_OBJECT, |
| 301 CODE_TARGET_SLOT, |
| 302 CODE_ENTRY_SLOT, |
| 303 DEBUG_TARGET_SLOT, |
| 304 JS_RETURN_SLOT, |
| 305 NUMBER_OF_SLOT_TYPES |
| 306 }; |
| 307 |
| 308 SlotType UpdateSlots(Heap* heap, SlotType pending); |
299 | 309 |
300 SlotsBuffer* next() { return next_; } | 310 SlotsBuffer* next() { return next_; } |
301 | 311 |
302 static int SizeOfChain(SlotsBuffer* buffer) { | 312 static int SizeOfChain(SlotsBuffer* buffer) { |
303 if (buffer == NULL) return 0; | 313 if (buffer == NULL) return 0; |
304 return static_cast<int>(buffer->idx_ + | 314 return static_cast<int>(buffer->idx_ + |
305 (buffer->chain_length_ - 1) * kNumberOfElements); | 315 (buffer->chain_length_ - 1) * kNumberOfElements); |
306 } | 316 } |
307 | 317 |
308 inline bool IsFull() { | 318 inline bool IsFull() { |
309 return idx_ == kNumberOfElements; | 319 return idx_ == kNumberOfElements; |
310 } | 320 } |
311 | 321 |
312 static void UpdateSlotsRecordedIn(SlotsBuffer* buffer) { | 322 static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer) { |
| 323 SlotType pending = NONE; |
313 while (buffer != NULL) { | 324 while (buffer != NULL) { |
314 buffer->UpdateSlots(); | 325 pending = buffer->UpdateSlots(heap, pending); |
315 buffer = buffer->next(); | 326 buffer = buffer->next(); |
316 } | 327 } |
317 } | 328 } |
318 | 329 |
319 enum AdditionMode { | 330 enum AdditionMode { |
320 FAIL_ON_OVERFLOW, | 331 FAIL_ON_OVERFLOW, |
321 IGNORE_OVERFLOW | 332 IGNORE_OVERFLOW |
322 }; | 333 }; |
323 | 334 |
324 static bool AddTo(SlotsBufferAllocator* allocator, | 335 static bool AddTo(SlotsBufferAllocator* allocator, |
325 SlotsBuffer** buffer_address, | 336 SlotsBuffer** buffer_address, |
326 ObjectSlot slot, | 337 ObjectSlot slot, |
327 AdditionMode mode) { | 338 AdditionMode mode) { |
328 SlotsBuffer* buffer = *buffer_address; | 339 SlotsBuffer* buffer = *buffer_address; |
329 if (buffer == NULL || buffer->IsFull()) { | 340 if (buffer == NULL || buffer->IsFull()) { |
330 if (mode == FAIL_ON_OVERFLOW && | 341 if (mode == FAIL_ON_OVERFLOW && |
331 buffer != NULL && | 342 buffer != NULL && |
332 buffer->chain_length_ >= kChainLengthThreshold) { | 343 buffer->chain_length_ >= kChainLengthThreshold) { |
333 allocator->DeallocateChain(buffer_address); | 344 allocator->DeallocateChain(buffer_address); |
334 return false; | 345 return false; |
335 } | 346 } |
336 buffer = allocator->AllocateBuffer(buffer); | 347 buffer = allocator->AllocateBuffer(buffer); |
337 *buffer_address = buffer; | 348 *buffer_address = buffer; |
338 } | 349 } |
339 buffer->Add(slot); | 350 buffer->Add(slot); |
340 return true; | 351 return true; |
341 } | 352 } |
342 | 353 |
| 354 static bool IsTypedSlot(ObjectSlot slot); |
| 355 |
| 356 static bool AddTo(SlotsBufferAllocator* allocator, |
| 357 SlotsBuffer** buffer_address, |
| 358 SlotType type, |
| 359 Address addr, |
| 360 AdditionMode mode); |
| 361 |
343 static const int kNumberOfElements = 1021; | 362 static const int kNumberOfElements = 1021; |
344 | 363 |
345 private: | 364 private: |
346 static const int kChainLengthThreshold = 6; | 365 static const int kChainLengthThreshold = 6; |
347 | 366 |
348 intptr_t idx_; | 367 intptr_t idx_; |
349 intptr_t chain_length_; | 368 intptr_t chain_length_; |
350 SlotsBuffer* next_; | 369 SlotsBuffer* next_; |
351 ObjectSlot slots_[kNumberOfElements]; | 370 ObjectSlot slots_[kNumberOfElements]; |
352 }; | 371 }; |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
448 | 467 |
449 // Sweep a single page from the given space conservatively. | 468 // Sweep a single page from the given space conservatively. |
450 // Return a number of reclaimed bytes. | 469 // Return a number of reclaimed bytes. |
451 static intptr_t SweepConservatively(PagedSpace* space, Page* p); | 470 static intptr_t SweepConservatively(PagedSpace* space, Page* p); |
452 | 471 |
453 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) { | 472 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) { |
454 return Page::FromAddress(reinterpret_cast<Address>(anchor))-> | 473 return Page::FromAddress(reinterpret_cast<Address>(anchor))-> |
455 ShouldSkipEvacuationSlotRecording(); | 474 ShouldSkipEvacuationSlotRecording(); |
456 } | 475 } |
457 | 476 |
| 477 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) { |
| 478 return Page::FromAddress(reinterpret_cast<Address>(host))-> |
| 479 ShouldSkipEvacuationSlotRecording(); |
| 480 } |
| 481 |
458 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) { | 482 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) { |
459 return Page::FromAddress(reinterpret_cast<Address>(obj))-> | 483 return Page::FromAddress(reinterpret_cast<Address>(obj))-> |
460 IsEvacuationCandidate(); | 484 IsEvacuationCandidate(); |
461 } | 485 } |
462 | 486 |
463 INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object)) { | 487 void EvictEvacuationCandidate(Page* page) { |
464 Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object)); | 488 if (FLAG_trace_fragmentation) { |
465 if (object_page->IsEvacuationCandidate() && | 489 PrintF("Page %p is too popular. Disabling evacuation.\n", |
466 !ShouldSkipEvacuationSlotRecording(anchor_slot)) { | 490 reinterpret_cast<void*>(page)); |
467 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, | 491 } |
468 object_page->slots_buffer_address(), | |
469 slot, | |
470 SlotsBuffer::FAIL_ON_OVERFLOW)) { | |
471 if (FLAG_trace_fragmentation) { | |
472 PrintF("Page %p is too popular. Disabling evacuation.\n", | |
473 reinterpret_cast<void*>(object_page)); | |
474 } | |
475 // TODO(gc) If all evacuation candidates are too popular we | |
476 // should stop slots recording entirely. | |
477 object_page->ClearEvacuationCandidate(); | |
478 | 492 |
479 // We were not collecting slots on this page that point | 493 // TODO(gc) If all evacuation candidates are too popular we |
480 // to other evacuation candidates thus we have to | 494 // should stop slots recording entirely. |
481 // rescan the page after evacuation to discover and update all | 495 page->ClearEvacuationCandidate(); |
482 // pointers to evacuated objects. | 496 |
483 if (object_page->owner()->identity() == OLD_DATA_SPACE) { | 497 // We were not collecting slots on this page that point |
484 evacuation_candidates_.RemoveElement(object_page); | 498 // to other evacuation candidates thus we have to |
485 } else { | 499 // rescan the page after evacuation to discover and update all |
486 object_page->SetFlag(Page::RESCAN_ON_EVACUATION); | 500 // pointers to evacuated objects. |
487 } | 501 if (page->owner()->identity() == OLD_DATA_SPACE) { |
488 } | 502 evacuation_candidates_.RemoveElement(page); |
| 503 } else { |
| 504 page->SetFlag(Page::RESCAN_ON_EVACUATION); |
489 } | 505 } |
490 } | 506 } |
491 | 507 |
| 508 void RecordRelocSlot(RelocInfo* rinfo, Code* target); |
| 509 void RecordCodeEntrySlot(Address slot, Code* target); |
| 510 |
| 511 INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object)); |
| 512 |
492 void MigrateObject(Address dst, | 513 void MigrateObject(Address dst, |
493 Address src, | 514 Address src, |
494 int size, | 515 int size, |
495 AllocationSpace to_old_space); | 516 AllocationSpace to_old_space); |
496 | 517 |
497 bool TryPromoteObject(HeapObject* object, int object_size); | 518 bool TryPromoteObject(HeapObject* object, int object_size); |
498 | 519 |
499 inline Object* encountered_weak_maps() { return encountered_weak_maps_; } | 520 inline Object* encountered_weak_maps() { return encountered_weak_maps_; } |
500 inline void set_encountered_weak_maps(Object* weak_map) { | 521 inline void set_encountered_weak_maps(Object* weak_map) { |
501 encountered_weak_maps_ = weak_map; | 522 encountered_weak_maps_ = weak_map; |
(...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
718 | 739 |
719 List<Page*> evacuation_candidates_; | 740 List<Page*> evacuation_candidates_; |
720 | 741 |
721 friend class Heap; | 742 friend class Heap; |
722 }; | 743 }; |
723 | 744 |
724 | 745 |
725 } } // namespace v8::internal | 746 } } // namespace v8::internal |
726 | 747 |
727 #endif // V8_MARK_COMPACT_H_ | 748 #endif // V8_MARK_COMPACT_H_ |
OLD | NEW |