Index: src/heap/spaces.h |
diff --git a/src/heap/spaces.h b/src/heap/spaces.h |
index b11f926fde3cf373b1144b0dc67ebc5f859c0a02..edbda223616748396afc30b35a69e97a71ddb9d2 100644 |
--- a/src/heap/spaces.h |
+++ b/src/heap/spaces.h |
@@ -6,6 +6,7 @@ |
#define V8_HEAP_SPACES_H_ |
#include "src/allocation.h" |
+#include "src/atomic-utils.h" |
#include "src/base/atomicops.h" |
#include "src/base/bits.h" |
#include "src/base/platform/mutex.h" |
@@ -560,14 +561,14 @@ class MemoryChunk { |
static const size_t kMinHeaderSize = |
kWriteBarrierCounterOffset + |
- kIntptrSize // intptr_t write_barrier_counter_ |
- + kIntSize // int progress_bar_ |
- + kIntSize // int high_water_mark_ |
- + kPointerSize // base::Mutex* mutex_ |
- + kPointerSize // base::AtomicWord parallel_sweeping_ |
- + 5 * kIntSize // int free-list statistics |
- + kPointerSize // base::AtomicWord next_chunk_ |
- + kPointerSize; // base::AtomicWord prev_chunk_ |
+ kIntptrSize // intptr_t write_barrier_counter_ |
+ + kIntSize // int progress_bar_ |
+ + kPointerSize // AtomicValue high_water_mark_ |
+ + kPointerSize // base::Mutex* mutex_ |
+ + kPointerSize // base::AtomicWord parallel_sweeping_ |
+ + 5 * kPointerSize // AtomicNumber free-list statistics |
+ + kPointerSize // base::AtomicWord next_chunk_ |
+ + kPointerSize; // base::AtomicWord prev_chunk_ |
// We add some more space to the computed header size to amount for missing |
// alignment requirements in our computation. |
@@ -674,21 +675,23 @@ class MemoryChunk { |
bool CommitArea(size_t requested); |
// Approximate amount of physical memory committed for this chunk. |
- size_t CommittedPhysicalMemory() { return high_water_mark_; } |
+ size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } |
// Should be called when memory chunk is about to be freed. |
void ReleaseAllocatedMemory(); |
static inline void UpdateHighWaterMark(Address mark) { |
- if (mark == NULL) return; |
+ if (mark == nullptr) return; |
// Need to subtract one from the mark because when a chunk is full the |
// top points to the next address after the chunk, which effectively belongs |
// to another chunk. See the comment to Page::FromAllocationTop. |
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); |
- int new_mark = static_cast<int>(mark - chunk->address()); |
- if (new_mark > chunk->high_water_mark_) { |
- chunk->high_water_mark_ = new_mark; |
- } |
+ intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); |
+ intptr_t old_mark = 0; |
+ do { |
+ old_mark = chunk->high_water_mark_.Value(); |
+ } while ((new_mark > old_mark) && |
+ !chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); |
} |
protected: |
@@ -719,17 +722,17 @@ class MemoryChunk { |
int progress_bar_; |
// Assuming the initial allocation on a page is sequential, |
// count highest number of bytes ever allocated on the page. |
- int high_water_mark_; |
+ AtomicValue<intptr_t> high_water_mark_; |
base::Mutex* mutex_; |
base::AtomicWord parallel_sweeping_; |
// PagedSpace free-list statistics. |
- int available_in_small_free_list_; |
- int available_in_medium_free_list_; |
- int available_in_large_free_list_; |
- int available_in_huge_free_list_; |
- int non_available_small_blocks_; |
+ AtomicNumber<intptr_t> available_in_small_free_list_; |
+ AtomicNumber<intptr_t> available_in_medium_free_list_; |
+ AtomicNumber<intptr_t> available_in_large_free_list_; |
+ AtomicNumber<intptr_t> available_in_huge_free_list_; |
+ AtomicNumber<intptr_t> non_available_small_blocks_; |
// next_chunk_ holds a pointer of type MemoryChunk |
base::AtomicWord next_chunk_; |
@@ -828,21 +831,22 @@ class Page : public MemoryChunk { |
void ResetFreeListStatistics(); |
int LiveBytesFromFreeList() { |
- return area_size() - non_available_small_blocks_ - |
- available_in_small_free_list_ - available_in_medium_free_list_ - |
- available_in_large_free_list_ - available_in_huge_free_list_; |
+ return static_cast<int>( |
+ area_size() - non_available_small_blocks() - |
+ available_in_small_free_list() - available_in_medium_free_list() - |
+ available_in_large_free_list() - available_in_huge_free_list()); |
} |
-#define FRAGMENTATION_STATS_ACCESSORS(type, name) \ |
- type name() { return name##_; } \ |
- void set_##name(type name) { name##_ = name; } \ |
- void add_##name(type name) { name##_ += name; } |
+#define FRAGMENTATION_STATS_ACCESSORS(type, name) \ |
+ type name() { return name##_.Value(); } \ |
+ void set_##name(type name) { name##_.SetValue(name); } \ |
+ void add_##name(type name) { name##_.Increment(name); } |
- FRAGMENTATION_STATS_ACCESSORS(int, non_available_small_blocks) |
- FRAGMENTATION_STATS_ACCESSORS(int, available_in_small_free_list) |
- FRAGMENTATION_STATS_ACCESSORS(int, available_in_medium_free_list) |
- FRAGMENTATION_STATS_ACCESSORS(int, available_in_large_free_list) |
- FRAGMENTATION_STATS_ACCESSORS(int, available_in_huge_free_list) |
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks) |
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list) |
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list) |
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list) |
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list) |
#undef FRAGMENTATION_STATS_ACCESSORS |
@@ -1128,21 +1132,25 @@ class MemoryAllocator { |
// together. |
void Free(MemoryChunk* chunk); |
- // Returns the maximum available bytes of heaps. |
- intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } |
- |
// Returns allocated spaces in bytes. |
- intptr_t Size() { return size_; } |
+ intptr_t Size() { return size_.Value(); } |
+ |
+ // Returns allocated executable spaces in bytes. |
+ intptr_t SizeExecutable() { return size_executable_.Value(); } |
+ |
+ // Returns the maximum available bytes of heaps. |
+ intptr_t Available() { |
+ intptr_t size = Size(); |
Hannes Payer (out of office)
2015/09/15 13:35:42
drop intptr_t size = Size(); and make it a one-lin
Michael Lippautz
2015/09/15 13:44:47
As discussed offline, there are 2 uses of the valu
|
+ return capacity_ < size ? 0 : capacity_ - size; |
+ } |
// Returns the maximum available executable bytes of heaps. |
intptr_t AvailableExecutable() { |
- if (capacity_executable_ < size_executable_) return 0; |
- return capacity_executable_ - size_executable_; |
+ intptr_t executable_size = SizeExecutable(); |
+ if (capacity_executable_ < executable_size) return 0; |
+ return capacity_executable_ - executable_size; |
} |
- // Returns allocated executable spaces in bytes. |
- intptr_t SizeExecutable() { return size_executable_; } |
- |
// Returns maximum available bytes that the old space can have. |
intptr_t MaxAvailable() { |
return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize; |
@@ -1150,9 +1158,9 @@ class MemoryAllocator { |
// Returns an indication of whether a pointer is in a space that has |
// been allocated by this MemoryAllocator. |
- V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { |
- return address < lowest_ever_allocated_ || |
- address >= highest_ever_allocated_; |
+ V8_INLINE bool IsOutsideAllocatedSpace(const void* address) { |
+ return address < lowest_ever_allocated_.Value() || |
+ address >= highest_ever_allocated_.Value(); |
} |
#ifdef DEBUG |
@@ -1232,22 +1240,22 @@ class MemoryAllocator { |
Isolate* isolate_; |
// Maximum space size in bytes. |
- size_t capacity_; |
+ intptr_t capacity_; |
// Maximum subset of capacity_ that can be executable |
- size_t capacity_executable_; |
+ intptr_t capacity_executable_; |
// Allocated space size in bytes. |
- size_t size_; |
+ AtomicNumber<intptr_t> size_; |
// Allocated executable space size in bytes. |
- size_t size_executable_; |
+ AtomicNumber<intptr_t> size_executable_; |
// We keep the lowest and highest addresses allocated as a quick way |
// of determining that pointers are outside the heap. The estimate is |
// conservative, i.e. not all addrsses in 'allocated' space are allocated |
// to our heap. The range is [lowest, highest[, inclusive on the low end |
// and exclusive on the high end. |
- void* lowest_ever_allocated_; |
- void* highest_ever_allocated_; |
+ AtomicValue<void*> lowest_ever_allocated_; |
+ AtomicValue<void*> highest_ever_allocated_; |
struct MemoryAllocationCallbackRegistration { |
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, |
@@ -1270,8 +1278,16 @@ class MemoryAllocator { |
PagedSpace* owner); |
void UpdateAllocatedSpaceLimits(void* low, void* high) { |
- lowest_ever_allocated_ = Min(lowest_ever_allocated_, low); |
- highest_ever_allocated_ = Max(highest_ever_allocated_, high); |
+ // The use of atomic primitives does not guarantee correctness (wrt. |
+ // desired semantics) by default. The loop here ensures that we update the |
+ // values only if they did not change in between. |
+ void* ptr = nullptr; |
+ do { |
+ ptr = lowest_ever_allocated_.Value(); |
+ } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); |
+ do { |
+ ptr = highest_ever_allocated_.Value(); |
+ } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); |
} |
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); |