Index: src/heap/spaces.h |
diff --git a/src/heap/spaces.h b/src/heap/spaces.h |
index a8102cabc7c5093b97bc59c45298c22fcb06ac72..9f51338b22d75e7cfc0d4e10b7e10c29695b2c25 100644 |
--- a/src/heap/spaces.h |
+++ b/src/heap/spaces.h |
@@ -12,6 +12,7 @@ |
#include "src/base/platform/mutex.h" |
#include "src/flags.h" |
#include "src/hashmap.h" |
+#include "src/heap/store-buffer.h" |
#include "src/list.h" |
#include "src/objects.h" |
#include "src/utils.h" |
@@ -396,7 +397,7 @@ class MemoryChunk { |
+ 2 * kPointerSize // base::VirtualMemory reservation_ |
+ kPointerSize // Address owner_ |
+ kPointerSize // Heap* heap_ |
- + kIntSize; // int store_buffer_counter_ |
+ + kIntSize; // int progress_bar_ |
static const size_t kSlotsBufferOffset = |
kLiveBytesOffset + kIntSize; // int live_byte_count_ |
@@ -408,7 +409,6 @@ class MemoryChunk { |
static const size_t kMinHeaderSize = |
kWriteBarrierCounterOffset + |
kIntptrSize // intptr_t write_barrier_counter_ |
- + kIntSize // int progress_bar_ |
+ kPointerSize // AtomicValue high_water_mark_ |
+ kPointerSize // base::Mutex* mutex_ |
+ kPointerSize // base::AtomicWord parallel_sweeping_ |
@@ -420,7 +420,7 @@ class MemoryChunk { |
// We add some more space to the computed header size to amount for missing |
// alignment requirements in our computation. |
// Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. |
- static const size_t kHeaderSize = kMinHeaderSize + kIntSize; |
+ static const size_t kHeaderSize = kMinHeaderSize; |
static const int kBodyOffset = |
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
@@ -519,11 +519,6 @@ class MemoryChunk { |
} |
inline void set_scan_on_scavenge(bool scan); |
- int store_buffer_counter() { return store_buffer_counter_; } |
- void set_store_buffer_counter(int counter) { |
- store_buffer_counter_ = counter; |
- } |
- |
bool Contains(Address addr) { |
return addr >= area_start() && addr < area_end(); |
} |
@@ -751,17 +746,14 @@ class MemoryChunk { |
// in a fixed array. |
Address owner_; |
Heap* heap_; |
- // Used by the store buffer to keep track of which pages to mark scan-on- |
- // scavenge. |
- int store_buffer_counter_; |
+ // Used by the incremental marker to keep track of the scanning progress in |
+ // large objects that have a progress bar and are scanned in increments. |
+ int progress_bar_; |
// Count of bytes marked black on page. |
int live_byte_count_; |
SlotsBuffer* slots_buffer_; |
SkipList* skip_list_; |
intptr_t write_barrier_counter_; |
- // Used by the incremental marker to keep track of the scanning progress in |
- // large objects that have a progress bar and are scanned in increments. |
- int progress_bar_; |
// Assuming the initial allocation on a page is sequential, |
// count highest number of bytes ever allocated on the page. |
AtomicValue<intptr_t> high_water_mark_; |
@@ -3005,6 +2997,8 @@ class CompactionSpaceCollection : public Malloced { |
explicit CompactionSpaceCollection(Heap* heap) |
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE), |
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE), |
+ local_pretenuring_feedback_(HashMap::PointersMatch, |
+ kInitialLocalPretenuringFeedbackCapacity), |
duration_(0.0), |
bytes_compacted_(0) {} |
@@ -3028,10 +3022,16 @@ class CompactionSpaceCollection : public Malloced { |
double duration() const { return duration_; } |
intptr_t bytes_compacted() const { return bytes_compacted_; } |
+ HashMap* local_pretenuring_feedback() { return &local_pretenuring_feedback_; } |
+ LocalStoreBuffer* local_store_buffer() { return &local_store_buffer_; } |
private: |
+ static const int kInitialLocalPretenuringFeedbackCapacity = 256; |
Michael Lippautz
2016/01/14 19:51:55
= the length of the previously used global scratch
|
+ |
CompactionSpace old_space_; |
CompactionSpace code_space_; |
+ HashMap local_pretenuring_feedback_; |
+ LocalStoreBuffer local_store_buffer_; |
// Book keeping. |
double duration_; |