Index: runtime/vm/zone.h |
diff --git a/runtime/vm/zone.h b/runtime/vm/zone.h |
index d73398bfe40588de5cf144578152612e4e6889bc..8e67664ac0a823d2f6dab06468662c273e82c017 100644 |
--- a/runtime/vm/zone.h |
+++ b/runtime/vm/zone.h |
@@ -64,11 +64,14 @@ class Zone { |
// Compute the total size of this zone. This includes wasted space that is |
// due to internal fragmentation in the segments. |
- intptr_t SizeInBytes() const; |
+ uintptr_t SizeInBytes() const; |
// Computes the amount of space used in the zone. |
intptr_t CapacityInBytes() const; |
+ // Computes the amount of space remaining in the zone. |
+ intptr_t FreeCapacityInBytes() const; |
+ |
// Structure for managing handles allocation. |
VMHandles* handles() { return &handles_; } |
@@ -76,10 +79,6 @@ class Zone { |
Zone* previous() const { return previous_; } |
-#ifndef PRODUCT |
- void PrintJSON(JSONStream* stream) const; |
-#endif |
- |
private: |
Zone(); |
~Zone(); // Delete all memory associated with the zone. |
@@ -180,7 +179,7 @@ class StackZone : public StackResource { |
// Compute the total size of this zone. This includes wasted space that is |
// due to internal fragmentation in the segments. |
- intptr_t SizeInBytes() const { return zone_.SizeInBytes(); } |
+ uintptr_t SizeInBytes() const { return zone_.SizeInBytes(); } |
// Computes the used space in the zone. |
intptr_t CapacityInBytes() const { return zone_.CapacityInBytes(); } |
@@ -200,7 +199,6 @@ class StackZone : public StackResource { |
inline uword Zone::AllocUnsafe(intptr_t size) { |
ASSERT(size >= 0); |
- |
// Round up the requested size to fit the alignment. |
if (size > (kIntptrMax - kAlignment)) { |
FATAL1("Zone::Alloc: 'size' is too large: size=%" Pd "", size); |
@@ -213,6 +211,10 @@ inline uword Zone::AllocUnsafe(intptr_t size) { |
if (free_size >= size) { |
result = position_; |
position_ += size; |
+ Thread* current_thread = Thread::Current(); |
siva
2017/03/22 17:50:55
This is an overhead on every zone allocation path,
bkonyi
2017/03/22 18:01:48
No, I haven't run a performance test yet. Now that
|
+ if (current_thread != NULL) { |
Cutch
2017/03/22 17:21:49
Refactor this code so there are two helper functio
bkonyi
2017/03/22 18:01:48
Done.
|
+ current_thread->IncrementMemoryUsage(size); |
+ } |
} else { |
result = AllocateExpand(size); |
} |
@@ -244,6 +246,7 @@ inline ElementType* Zone::Realloc(ElementType* old_data, |
intptr_t old_len, |
intptr_t new_len) { |
CheckLength<ElementType>(new_len); |
+ Thread* current_thread = Thread::Current(); |
Cutch
2017/03/22 17:21:50
move this Thread::Current() call so it only happen
bkonyi
2017/03/22 18:01:48
Done.
|
const intptr_t kElementSize = sizeof(ElementType); |
uword old_end = reinterpret_cast<uword>(old_data) + (old_len * kElementSize); |
// Resize existing allocation if nothing was allocated in between... |
@@ -252,7 +255,13 @@ inline ElementType* Zone::Realloc(ElementType* old_data, |
reinterpret_cast<uword>(old_data) + (new_len * kElementSize); |
// ...and there is sufficient space. |
if (new_end <= limit_) { |
+ ASSERT(new_len >= old_len); |
+ uword previous = position_; |
position_ = Utils::RoundUp(new_end, kAlignment); |
+ if (current_thread != NULL && previous != position_) { |
Cutch
2017/03/22 17:21:50
Wrap each of these != expressions in parens.
bkonyi
2017/03/22 18:01:48
Acknowledged.
|
+ current_thread->IncrementMemoryUsage((new_len - old_len) * |
+ kElementSize); |
+ } |
return old_data; |
} |
} |