Chromium Code Reviews| Index: src/heap/spaces.h |
| diff --git a/src/heap/spaces.h b/src/heap/spaces.h |
| index b11f926fde3cf373b1144b0dc67ebc5f859c0a02..943f04d182bea57b839598bee5bd583a8411e8ff 100644 |
| --- a/src/heap/spaces.h |
| +++ b/src/heap/spaces.h |
| @@ -6,6 +6,7 @@ |
| #define V8_HEAP_SPACES_H_ |
| #include "src/allocation.h" |
| +#include "src/atomic-utils.h" |
| #include "src/base/atomicops.h" |
| #include "src/base/bits.h" |
| #include "src/base/platform/mutex.h" |
| @@ -560,14 +561,14 @@ class MemoryChunk { |
| static const size_t kMinHeaderSize = |
| kWriteBarrierCounterOffset + |
| - kIntptrSize // intptr_t write_barrier_counter_ |
| - + kIntSize // int progress_bar_ |
| - + kIntSize // int high_water_mark_ |
| - + kPointerSize // base::Mutex* mutex_ |
| - + kPointerSize // base::AtomicWord parallel_sweeping_ |
| - + 5 * kIntSize // int free-list statistics |
| - + kPointerSize // base::AtomicWord next_chunk_ |
| - + kPointerSize; // base::AtomicWord prev_chunk_ |
| + kIntptrSize // intptr_t write_barrier_counter_ |
| + + kIntSize // int progress_bar_ |
| + + kPointerSize // AtomicValue high_water_mark_ |
| + + kPointerSize // base::Mutex* mutex_ |
| + + kPointerSize // base::AtomicWord parallel_sweeping_ |
| + + 5 * kPointerSize // AtomicNumber free-list statistics |
| + + kPointerSize // base::AtomicWord next_chunk_ |
| + + kPointerSize; // base::AtomicWord prev_chunk_ |
| // We add some more space to the computed header size to amount for missing |
| // alignment requirements in our computation. |
| @@ -674,21 +675,23 @@ class MemoryChunk { |
| bool CommitArea(size_t requested); |
| // Approximate amount of physical memory committed for this chunk. |
| - size_t CommittedPhysicalMemory() { return high_water_mark_; } |
| + size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } |
| // Should be called when memory chunk is about to be freed. |
| void ReleaseAllocatedMemory(); |
| static inline void UpdateHighWaterMark(Address mark) { |
| - if (mark == NULL) return; |
| + if (mark == nullptr) return; |
| // Need to subtract one from the mark because when a chunk is full the |
| // top points to the next address after the chunk, which effectively belongs |
| // to another chunk. See the comment to Page::FromAllocationTop. |
| MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); |
| - int new_mark = static_cast<int>(mark - chunk->address()); |
| - if (new_mark > chunk->high_water_mark_) { |
| - chunk->high_water_mark_ = new_mark; |
| - } |
| + intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); |
| + intptr_t old_mark = 0; |
| + do { |
| + old_mark = chunk->high_water_mark_.Value(); |
| + } while ((new_mark > old_mark) && |
| + !chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); |
| } |
| protected: |
| @@ -719,17 +722,17 @@ class MemoryChunk { |
| int progress_bar_; |
| // Assuming the initial allocation on a page is sequential, |
| // count highest number of bytes ever allocated on the page. |
| - int high_water_mark_; |
| + AtomicValue<intptr_t> high_water_mark_; |
| base::Mutex* mutex_; |
| base::AtomicWord parallel_sweeping_; |
| // PagedSpace free-list statistics. |
| - int available_in_small_free_list_; |
| - int available_in_medium_free_list_; |
| - int available_in_large_free_list_; |
| - int available_in_huge_free_list_; |
| - int non_available_small_blocks_; |
| + AtomicNumber<intptr_t> available_in_small_free_list_; |
| + AtomicNumber<intptr_t> available_in_medium_free_list_; |
| + AtomicNumber<intptr_t> available_in_large_free_list_; |
| + AtomicNumber<intptr_t> available_in_huge_free_list_; |
| + AtomicNumber<intptr_t> non_available_small_blocks_; |
| // next_chunk_ holds a pointer of type MemoryChunk |
| base::AtomicWord next_chunk_; |
| @@ -828,21 +831,22 @@ class Page : public MemoryChunk { |
| void ResetFreeListStatistics(); |
| int LiveBytesFromFreeList() { |
| - return area_size() - non_available_small_blocks_ - |
| - available_in_small_free_list_ - available_in_medium_free_list_ - |
| - available_in_large_free_list_ - available_in_huge_free_list_; |
| + return static_cast<int>( |
| + area_size() - non_available_small_blocks() - |
| + available_in_small_free_list() - available_in_medium_free_list() - |
| + available_in_large_free_list() - available_in_huge_free_list()); |
| } |
| -#define FRAGMENTATION_STATS_ACCESSORS(type, name) \ |
| - type name() { return name##_; } \ |
| - void set_##name(type name) { name##_ = name; } \ |
| - void add_##name(type name) { name##_ += name; } |
| +#define FRAGMENTATION_STATS_ACCESSORS(type, name) \ |
| + type name() { return name##_.Value(); } \ |
| + void set_##name(type name) { name##_.SetValue(name); } \ |
| + void add_##name(type name) { name##_.Increment(name); } |
| - FRAGMENTATION_STATS_ACCESSORS(int, non_available_small_blocks) |
| - FRAGMENTATION_STATS_ACCESSORS(int, available_in_small_free_list) |
| - FRAGMENTATION_STATS_ACCESSORS(int, available_in_medium_free_list) |
| - FRAGMENTATION_STATS_ACCESSORS(int, available_in_large_free_list) |
| - FRAGMENTATION_STATS_ACCESSORS(int, available_in_huge_free_list) |
| + FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks) |
| + FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list) |
| + FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list) |
| + FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list) |
| + FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list) |
| #undef FRAGMENTATION_STATS_ACCESSORS |
| @@ -1150,9 +1154,9 @@ class MemoryAllocator { |
| // Returns an indication of whether a pointer is in a space that has |
| // been allocated by this MemoryAllocator. |
| - V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { |
| - return address < lowest_ever_allocated_ || |
| - address >= highest_ever_allocated_; |
| + V8_INLINE bool IsOutsideAllocatedSpace(const void* address) { |
| + return address < lowest_ever_allocated_.Value() || |
| + address >= highest_ever_allocated_.Value(); |
| } |
| #ifdef DEBUG |
| @@ -1246,8 +1250,8 @@ class MemoryAllocator { |
| // conservative, i.e. not all addrsses in 'allocated' space are allocated |
| // to our heap. The range is [lowest, highest[, inclusive on the low end |
| // and exclusive on the high end. |
| - void* lowest_ever_allocated_; |
| - void* highest_ever_allocated_; |
| + AtomicValue<void*> lowest_ever_allocated_; |
| + AtomicValue<void*> highest_ever_allocated_; |
| struct MemoryAllocationCallbackRegistration { |
| MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, |
| @@ -1270,8 +1274,13 @@ class MemoryAllocator { |
| PagedSpace* owner); |
| void UpdateAllocatedSpaceLimits(void* low, void* high) { |
| - lowest_ever_allocated_ = Min(lowest_ever_allocated_, low); |
| - highest_ever_allocated_ = Max(highest_ever_allocated_, high); |
| + void* ptr = nullptr; |
| + do { |
| + ptr = lowest_ever_allocated_.Value(); |
|
Hannes Payer (out of office)
2015/09/15 09:55:36
Can this be a function on AtomicValue?
Michael Lippautz
2015/09/15 11:54:53
As discussed offline: While lamdas are supported,
|
| + } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); |
| + do { |
| + ptr = highest_ever_allocated_.Value(); |
| + } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); |
| } |
| DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); |