Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 249 int bottom_; | 249 int bottom_; |
| 250 int mask_; | 250 int mask_; |
| 251 bool overflowed_; | 251 bool overflowed_; |
| 252 | 252 |
| 253 DISALLOW_COPY_AND_ASSIGN(MarkingDeque); | 253 DISALLOW_COPY_AND_ASSIGN(MarkingDeque); |
| 254 }; | 254 }; |
| 255 | 255 |
| 256 | 256 |
| 257 class SlotsBufferAllocator { | 257 class SlotsBufferAllocator { |
| 258 public: | 258 public: |
| 259 SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer); | 259 SlotsBuffer* AllocateBuffer(Heap* heap, SlotsBuffer* next_buffer); |
| 260 void DeallocateBuffer(SlotsBuffer* buffer); | 260 void DeallocateBuffer(SlotsBuffer* buffer); |
| 261 | |
| 262 void DeallocateChain(SlotsBuffer** buffer_address); | 261 void DeallocateChain(SlotsBuffer** buffer_address); |
| 263 }; | 262 }; |
| 264 | 263 |
| 265 | 264 |
| 266 // SlotsBuffer records a sequence of slots that has to be updated | 265 // SlotsBuffer records a sequence of slots that has to be updated |
| 267 // after live objects were relocated from evacuation candidates. | 266 // after live objects were relocated from evacuation candidates. |
| 268 // All slots are either untyped or typed: | 267 // All slots are either untyped or typed: |
| 269 // - Untyped slots are expected to contain a tagged object pointer. | 268 // - Untyped slots are expected to contain a tagged object pointer. |
| 270 // They are recorded by an address. | 269 // They are recorded by an address. |
| 271 // - Typed slots are expected to contain an encoded pointer to a heap | 270 // - Typed slots are expected to contain an encoded pointer to a heap |
| 272 // object where the way of encoding depends on the type of the slot. | 271 // object where the way of encoding depends on the type of the slot. |
| 273 // They are recorded as a pair (SlotType, slot address). | 272 // They are recorded as a pair (SlotType, slot address). |
| 274 // We assume that zero-page is never mapped this allows us to distinguish | 273 // We assume that zero-page is never mapped this allows us to distinguish |
| 275 // untyped slots from typed slots during iteration by a simple comparison: | 274 // untyped slots from typed slots during iteration by a simple comparison: |
| 276 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it | 275 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it |
| 277 // is the first element of typed slot's pair. | 276 // is the first element of typed slot's pair. |
| 278 class SlotsBuffer { | 277 class SlotsBuffer { |
| 279 public: | 278 public: |
| 280 typedef Object** ObjectSlot; | 279 typedef Object** ObjectSlot; |
| 281 | 280 |
| 282 explicit SlotsBuffer(SlotsBuffer* next_buffer) | 281 SlotsBuffer(Heap* heap, SlotsBuffer* next_buffer) |
| 283 : idx_(0), chain_length_(1), next_(next_buffer) { | 282 : idx_(0), chain_length_(1), heap_(heap), next_(next_buffer) { |
| 284 if (next_ != NULL) { | 283 if (next_ != NULL) { |
| 285 chain_length_ = next_->chain_length_ + 1; | 284 chain_length_ = next_->chain_length_ + 1; |
| 286 } | 285 } |
| 287 } | 286 } |
| 288 | 287 |
| 289 ~SlotsBuffer() { | 288 ~SlotsBuffer() { |
| 290 } | 289 } |
| 291 | 290 |
| 292 void Add(ObjectSlot slot) { | 291 void Add(ObjectSlot slot) { |
| 293 ASSERT(0 <= idx_ && idx_ < kNumberOfElements); | 292 ASSERT(0 <= idx_ && idx_ < kNumberOfElements); |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 317 case DEBUG_TARGET_SLOT: | 316 case DEBUG_TARGET_SLOT: |
| 318 return "DEBUG_TARGET_SLOT"; | 317 return "DEBUG_TARGET_SLOT"; |
| 319 case JS_RETURN_SLOT: | 318 case JS_RETURN_SLOT: |
| 320 return "JS_RETURN_SLOT"; | 319 return "JS_RETURN_SLOT"; |
| 321 case NUMBER_OF_SLOT_TYPES: | 320 case NUMBER_OF_SLOT_TYPES: |
| 322 return "NUMBER_OF_SLOT_TYPES"; | 321 return "NUMBER_OF_SLOT_TYPES"; |
| 323 } | 322 } |
| 324 return "UNKNOWN SlotType"; | 323 return "UNKNOWN SlotType"; |
| 325 } | 324 } |
| 326 | 325 |
| 327 void UpdateSlots(Heap* heap); | 326 void UpdateSlots(); |
| 328 | 327 |
|
Michael Starzinger
2014/03/06 10:47:25
nit: These two should be made protected IMHO.
| |
| 329 void UpdateSlotsWithFilter(Heap* heap); | 328 void UpdateSlotsWithFilter(); |
| 330 | 329 |
| 331 SlotsBuffer* next() { return next_; } | 330 SlotsBuffer* next() { return next_; } |
| 332 | 331 |
| 333 static int SizeOfChain(SlotsBuffer* buffer) { | 332 static int SizeOfChain(SlotsBuffer* buffer) { |
| 334 if (buffer == NULL) return 0; | 333 if (buffer == NULL) return 0; |
| 335 return static_cast<int>(buffer->idx_ + | 334 return static_cast<int>(buffer->idx_ + |
| 336 (buffer->chain_length_ - 1) * kNumberOfElements); | 335 (buffer->chain_length_ - 1) * kNumberOfElements); |
| 337 } | 336 } |
| 338 | 337 |
| 339 inline bool IsFull() { | 338 inline bool IsFull() { |
| 340 return idx_ == kNumberOfElements; | 339 return idx_ == kNumberOfElements; |
| 341 } | 340 } |
| 342 | 341 |
| 343 inline bool HasSpaceForTypedSlot() { | 342 inline bool HasSpaceForTypedSlot() { |
| 344 return idx_ < kNumberOfElements - 1; | 343 return idx_ < kNumberOfElements - 1; |
| 345 } | 344 } |
| 346 | 345 |
| 347 static void UpdateSlotsRecordedIn(Heap* heap, | 346 inline void UpdateSlotsRecordedIn(bool code_slots_filtering_required) { |
|
Michael Starzinger
2014/03/06 10:47:25
nit: The naming is a little bit weird now, because
| |
| 348 SlotsBuffer* buffer, | 347 SlotsBuffer* buffer = this; |
| 349 bool code_slots_filtering_required) { | |
| 350 while (buffer != NULL) { | 348 while (buffer != NULL) { |
| 351 if (code_slots_filtering_required) { | 349 if (code_slots_filtering_required) { |
| 352 buffer->UpdateSlotsWithFilter(heap); | 350 buffer->UpdateSlotsWithFilter(); |
| 353 } else { | 351 } else { |
| 354 buffer->UpdateSlots(heap); | 352 buffer->UpdateSlots(); |
| 355 } | 353 } |
| 356 buffer = buffer->next(); | 354 buffer = buffer->next(); |
| 357 } | 355 } |
| 358 } | 356 } |
| 359 | 357 |
| 360 enum AdditionMode { | 358 enum AdditionMode { |
| 361 FAIL_ON_OVERFLOW, | 359 FAIL_ON_OVERFLOW, |
| 362 IGNORE_OVERFLOW | 360 IGNORE_OVERFLOW |
| 363 }; | 361 }; |
| 364 | 362 |
| 365 static bool ChainLengthThresholdReached(SlotsBuffer* buffer) { | 363 static bool ChainLengthThresholdReached(SlotsBuffer* buffer) { |
| 366 return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold; | 364 return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold; |
| 367 } | 365 } |
| 368 | 366 |
| 369 INLINE(static bool AddTo(SlotsBufferAllocator* allocator, | 367 INLINE(static bool AddTo(Heap* heap, |
| 368 SlotsBufferAllocator* allocator, | |
| 370 SlotsBuffer** buffer_address, | 369 SlotsBuffer** buffer_address, |
| 371 ObjectSlot slot, | 370 ObjectSlot slot, |
| 372 AdditionMode mode)) { | 371 AdditionMode mode)); |
| 373 SlotsBuffer* buffer = *buffer_address; | 372 |
| 374 if (buffer == NULL || buffer->IsFull()) { | 373 INLINE(static bool AddTo(Heap* heap, |
| 375 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { | 374 SlotsBufferAllocator* allocator, |
| 376 allocator->DeallocateChain(buffer_address); | 375 SlotsBuffer** buffer_address, |
| 377 return false; | 376 SlotType type, |
| 378 } | 377 Address addr, |
| 379 buffer = allocator->AllocateBuffer(buffer); | 378 AdditionMode mode)); |
| 380 *buffer_address = buffer; | 379 |
| 381 } | 380 Heap* heap() const { return heap_; } |
| 382 buffer->Add(slot); | |
| 383 return true; | |
| 384 } | |
| 385 | 381 |
| 386 static bool IsTypedSlot(ObjectSlot slot); | 382 static bool IsTypedSlot(ObjectSlot slot); |
| 387 | 383 |
| 388 static bool AddTo(SlotsBufferAllocator* allocator, | 384 static const int kNumberOfElements = 1020; |
|
ulan
2014/03/05 15:40:18
Why is this change to 1020 necessary?
Hannes Payer (out of office)
2014/03/05 20:39:29
The slots buffer should fit on a page. After addin
| |
| 389 SlotsBuffer** buffer_address, | |
| 390 SlotType type, | |
| 391 Address addr, | |
| 392 AdditionMode mode); | |
| 393 | |
| 394 static const int kNumberOfElements = 1021; | |
| 395 | 385 |
| 396 private: | 386 private: |
| 397 static const int kChainLengthThreshold = 15; | 387 static const int kChainLengthThreshold = 15; |
| 398 | 388 |
| 399 intptr_t idx_; | 389 intptr_t idx_; |
| 400 intptr_t chain_length_; | 390 intptr_t chain_length_; |
| 391 Heap* heap_; | |
| 401 SlotsBuffer* next_; | 392 SlotsBuffer* next_; |
| 402 ObjectSlot slots_[kNumberOfElements]; | 393 ObjectSlot slots_[kNumberOfElements]; |
| 403 }; | 394 }; |
| 404 | 395 |
| 405 | 396 |
| 406 // CodeFlusher collects candidates for code flushing during marking and | 397 // CodeFlusher collects candidates for code flushing during marking and |
| 407 // processes those candidates after marking has completed in order to | 398 // processes those candidates after marking has completed in order to |
| 408 // reset those functions referencing code objects that would otherwise | 399 // reset those functions referencing code objects that would otherwise |
| 409 // be unreachable. Code objects can be referenced in three ways: | 400 // be unreachable. Code objects can be referenced in three ways: |
| 410 // - SharedFunctionInfo references unoptimized code. | 401 // - SharedFunctionInfo references unoptimized code. |
| (...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 740 } | 731 } |
| 741 | 732 |
| 742 // Mark the global table which maps weak objects to dependent code without | 733 // Mark the global table which maps weak objects to dependent code without |
| 743 // marking its contents. | 734 // marking its contents. |
| 744 void MarkWeakObjectToCodeTable(); | 735 void MarkWeakObjectToCodeTable(); |
| 745 | 736 |
| 746 // Special case for processing weak references in a full collection. We need | 737 // Special case for processing weak references in a full collection. We need |
| 747 // to artifically keep AllocationSites alive for a time. | 738 // to artifically keep AllocationSites alive for a time. |
| 748 void MarkAllocationSite(AllocationSite* site); | 739 void MarkAllocationSite(AllocationSite* site); |
| 749 | 740 |
| 741 void IncrementEvacuationScope() { | |
| 742 evacuation_scope_++; | |
| 743 ASSERT(evacuation_scope_ <= 1); | |
|
Michael Starzinger
2014/03/06 10:47:25
These two ASSERTS combined basically enforce ...
| |
| 744 } | |
| 745 | |
| 746 void DecrementEvacuationScope() { | |
| 747 evacuation_scope_--; | |
| 748 ASSERT(evacuation_scope_ >= 0); | |
| 749 } | |
| 750 | |
| 751 bool IsInsideEvacuationScope() { | |
| 752 return evacuation_scope_ > 0; | |
| 753 } | |
| 754 | |
| 750 private: | 755 private: |
| 751 class SweeperTask; | 756 class SweeperTask; |
| 752 | 757 |
| 753 explicit MarkCompactCollector(Heap* heap); | 758 explicit MarkCompactCollector(Heap* heap); |
| 754 ~MarkCompactCollector(); | 759 ~MarkCompactCollector(); |
| 755 | 760 |
| 756 bool MarkInvalidatedCode(); | 761 bool MarkInvalidatedCode(); |
| 757 bool WillBeDeoptimized(Code* code); | 762 bool WillBeDeoptimized(Code* code); |
| 758 void RemoveDeadInvalidatedCode(); | 763 void RemoveDeadInvalidatedCode(); |
| 759 void ProcessInvalidatedCode(ObjectVisitor* visitor); | 764 void ProcessInvalidatedCode(ObjectVisitor* visitor); |
| (...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 967 CodeFlusher* code_flusher_; | 972 CodeFlusher* code_flusher_; |
| 968 Object* encountered_weak_collections_; | 973 Object* encountered_weak_collections_; |
| 969 bool have_code_to_deoptimize_; | 974 bool have_code_to_deoptimize_; |
| 970 | 975 |
| 971 List<Page*> evacuation_candidates_; | 976 List<Page*> evacuation_candidates_; |
| 972 List<Code*> invalidated_code_; | 977 List<Code*> invalidated_code_; |
| 973 | 978 |
| 974 SmartPointer<FreeList> free_list_old_data_space_; | 979 SmartPointer<FreeList> free_list_old_data_space_; |
| 975 SmartPointer<FreeList> free_list_old_pointer_space_; | 980 SmartPointer<FreeList> free_list_old_pointer_space_; |
| 976 | 981 |
| 982 int evacuation_scope_; | |
| 983 | |
| 977 friend class Heap; | 984 friend class Heap; |
| 978 }; | 985 }; |
| 979 | 986 |
| 980 | 987 |
| 981 class MarkBitCellIterator BASE_EMBEDDED { | 988 class MarkBitCellIterator BASE_EMBEDDED { |
| 982 public: | 989 public: |
| 983 explicit MarkBitCellIterator(MemoryChunk* chunk) | 990 explicit MarkBitCellIterator(MemoryChunk* chunk) |
| 984 : chunk_(chunk) { | 991 : chunk_(chunk) { |
| 985 last_cell_index_ = Bitmap::IndexToCell( | 992 last_cell_index_ = Bitmap::IndexToCell( |
| 986 Bitmap::CellAlignIndex( | 993 Bitmap::CellAlignIndex( |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1031 | 1038 |
| 1032 ~SequentialSweepingScope() { | 1039 ~SequentialSweepingScope() { |
| 1033 collector_->set_sequential_sweeping(false); | 1040 collector_->set_sequential_sweeping(false); |
| 1034 } | 1041 } |
| 1035 | 1042 |
| 1036 private: | 1043 private: |
| 1037 MarkCompactCollector* collector_; | 1044 MarkCompactCollector* collector_; |
| 1038 }; | 1045 }; |
| 1039 | 1046 |
| 1040 | 1047 |
| 1048 class EvacuationScope V8_FINAL { | |
|
Michael Starzinger
2014/03/06 10:47:25
High-level idea: This scope pretty much coincides
| |
| 1049 public: | |
| 1050 explicit EvacuationScope(MarkCompactCollector* mark_compact) : | |
| 1051 mark_compact_(mark_compact) { | |
| 1052 mark_compact_->IncrementEvacuationScope(); | |
| 1053 } | |
| 1054 ~EvacuationScope() { | |
| 1055 mark_compact_->DecrementEvacuationScope(); | |
| 1056 } | |
| 1057 | |
| 1058 private: | |
| 1059 MarkCompactCollector* mark_compact_; | |
| 1060 }; | |
| 1061 | |
| 1062 | |
| 1041 const char* AllocationSpaceName(AllocationSpace space); | 1063 const char* AllocationSpaceName(AllocationSpace space); |
| 1042 | 1064 |
| 1043 } } // namespace v8::internal | 1065 } } // namespace v8::internal |
| 1044 | 1066 |
| 1045 #endif // V8_MARK_COMPACT_H_ | 1067 #endif // V8_MARK_COMPACT_H_ |
| OLD | NEW |