Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(213)

Side by Side Diff: src/heap/heap.h

Issue 2312643003: [heap] Move PromotionQueue implementation out of heap.h (Closed)
Patch Set: Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | src/heap/heap.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_HEAP_H_ 5 #ifndef V8_HEAP_HEAP_H_
6 #define V8_HEAP_HEAP_H_ 6 #define V8_HEAP_HEAP_H_
7 7
8 #include <cmath> 8 #include <cmath>
9 #include <map> 9 #include <map>
10 10
(...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after
324 class HeapStats; 324 class HeapStats;
325 class HistogramTimer; 325 class HistogramTimer;
326 class Isolate; 326 class Isolate;
327 class MemoryReducer; 327 class MemoryReducer;
328 class ObjectStats; 328 class ObjectStats;
329 class Scavenger; 329 class Scavenger;
330 class ScavengeJob; 330 class ScavengeJob;
331 class StoreBuffer; 331 class StoreBuffer;
332 class WeakObjectRetainer; 332 class WeakObjectRetainer;
333 333
334 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
335
334 enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION }; 336 enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION };
335 337
336 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to); 338 enum ArrayStorageAllocationMode {
339 DONT_INITIALIZE_ARRAY_ELEMENTS,
340 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
341 };
337 342
338 // A queue of objects promoted during scavenge. Each object is accompanied 343 enum class ClearRecordedSlots { kYes, kNo };
339 // by it's size to avoid dereferencing a map pointer for scanning. 344
340 // The last page in to-space is used for the promotion queue. On conflict 345 enum class ClearBlackArea { kYes, kNo };
341 // during scavenge, the promotion queue is allocated externally and all 346
342 // entries are copied to the external queue. 347 // A queue of objects promoted during scavenge. Each object is accompanied by
348 // its size to avoid dereferencing a map pointer for scanning. The last page in
349 // to-space is used for the promotion queue. On conflict during scavenge, the
350 // promotion queue is allocated externally and all entries are copied to the
351 // external queue.
343 class PromotionQueue { 352 class PromotionQueue {
344 public: 353 public:
345 explicit PromotionQueue(Heap* heap) 354 explicit PromotionQueue(Heap* heap)
346 : front_(NULL), 355 : front_(nullptr),
347 rear_(NULL), 356 rear_(nullptr),
348 limit_(NULL), 357 limit_(nullptr),
349 emergency_stack_(0), 358 emergency_stack_(nullptr),
350 heap_(heap) {} 359 heap_(heap) {}
351 360
352 void Initialize(); 361 void Initialize();
362 void Destroy();
353 363
354 void Destroy() { 364 inline void SetNewLimit(Address limit);
355 DCHECK(is_empty()); 365 inline bool IsBelowPromotionQueue(Address to_space_top);
356 delete emergency_stack_;
357 emergency_stack_ = NULL;
358 }
359 366
360 Page* GetHeadPage() { 367 inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
361 return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_)); 368 inline void remove(HeapObject** target, int32_t* size,
362 } 369 bool* was_marked_black);
363
364 void SetNewLimit(Address limit) {
365 // If we are already using an emergency stack, we can ignore it.
366 if (emergency_stack_) return;
367
368 // If the limit is not on the same page, we can ignore it.
369 if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
370
371 limit_ = reinterpret_cast<struct Entry*>(limit);
372
373 if (limit_ <= rear_) {
374 return;
375 }
376
377 RelocateQueueHead();
378 }
379
380 bool IsBelowPromotionQueue(Address to_space_top) {
381 // If an emergency stack is used, the to-space address cannot interfere
382 // with the promotion queue.
383 if (emergency_stack_) return true;
384
385 // If the given to-space top pointer and the head of the promotion queue
386 // are not on the same page, then the to-space objects are below the
387 // promotion queue.
388 if (GetHeadPage() != Page::FromAddress(to_space_top)) {
389 return true;
390 }
391 // If the to space top pointer is smaller or equal than the promotion
392 // queue head, then the to-space objects are below the promotion queue.
393 return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
394 }
395 370
396 bool is_empty() { 371 bool is_empty() {
397 return (front_ == rear_) && 372 return (front_ == rear_) &&
398 (emergency_stack_ == NULL || emergency_stack_->length() == 0); 373 (emergency_stack_ == nullptr || emergency_stack_->length() == 0);
399 }
400
401 inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
402
403 void remove(HeapObject** target, int32_t* size, bool* was_marked_black) {
404 DCHECK(!is_empty());
405 if (front_ == rear_) {
406 Entry e = emergency_stack_->RemoveLast();
407 *target = e.obj_;
408 *size = e.size_;
409 *was_marked_black = e.was_marked_black_;
410 return;
411 }
412
413 struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
414 *target = entry->obj_;
415 *size = entry->size_;
416 *was_marked_black = entry->was_marked_black_;
417
418 // Assert no underflow.
419 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
420 reinterpret_cast<Address>(front_));
421 } 374 }
422 375
423 private: 376 private:
424 struct Entry { 377 struct Entry {
425 Entry(HeapObject* obj, int32_t size, bool was_marked_black) 378 Entry(HeapObject* obj, int32_t size, bool was_marked_black)
426 : obj_(obj), size_(size), was_marked_black_(was_marked_black) {} 379 : obj_(obj), size_(size), was_marked_black_(was_marked_black) {}
427 380
428 HeapObject* obj_; 381 HeapObject* obj_;
429 int32_t size_ : 31; 382 int32_t size_ : 31;
430 bool was_marked_black_ : 1; 383 bool was_marked_black_ : 1;
431 }; 384 };
432 385
386 inline Page* GetHeadPage();
387
433 void RelocateQueueHead(); 388 void RelocateQueueHead();
434 389
435 // The front of the queue is higher in the memory page chain than the rear. 390 // The front of the queue is higher in the memory page chain than the rear.
436 struct Entry* front_; 391 struct Entry* front_;
437 struct Entry* rear_; 392 struct Entry* rear_;
438 struct Entry* limit_; 393 struct Entry* limit_;
439 394
440 List<Entry>* emergency_stack_; 395 List<Entry>* emergency_stack_;
441
442 Heap* heap_; 396 Heap* heap_;
443 397
444 DISALLOW_COPY_AND_ASSIGN(PromotionQueue); 398 DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
445 }; 399 };
446 400
447
448 enum ArrayStorageAllocationMode {
Michael Lippautz 2016/09/05 16:10:11 Moved up
449 DONT_INITIALIZE_ARRAY_ELEMENTS,
450 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
451 };
452
453 enum class ClearRecordedSlots { kYes, kNo };
454
455 enum class ClearBlackArea { kYes, kNo };
456
457 class Heap { 401 class Heap {
458 public: 402 public:
459 // Declare all the root indices. This defines the root list order. 403 // Declare all the root indices. This defines the root list order.
460 enum RootListIndex { 404 enum RootListIndex {
461 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, 405 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
462 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) 406 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
463 #undef ROOT_INDEX_DECLARATION 407 #undef ROOT_INDEX_DECLARATION
464 408
465 #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, 409 #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
466 INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) 410 INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
(...skipping 2262 matching lines...) Expand 10 before | Expand all | Expand 10 after
2729 friend class LargeObjectSpace; 2673 friend class LargeObjectSpace;
2730 friend class NewSpace; 2674 friend class NewSpace;
2731 friend class PagedSpace; 2675 friend class PagedSpace;
2732 DISALLOW_COPY_AND_ASSIGN(AllocationObserver); 2676 DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
2733 }; 2677 };
2734 2678
2735 } // namespace internal 2679 } // namespace internal
2736 } // namespace v8 2680 } // namespace v8
2737 2681
2738 #endif // V8_HEAP_HEAP_H_ 2682 #endif // V8_HEAP_HEAP_H_
OLDNEW
« no previous file with comments | « no previous file | src/heap/heap.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698