Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/zone.h" | 5 #include "vm/zone.h" |
| 6 | 6 |
| 7 #include "platform/assert.h" | 7 #include "platform/assert.h" |
| 8 #include "platform/utils.h" | 8 #include "platform/utils.h" |
| 9 #include "vm/dart_api_state.h" | 9 #include "vm/dart_api_state.h" |
| 10 #include "vm/flags.h" | 10 #include "vm/flags.h" |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 23 intptr_t size() const { return size_; } | 23 intptr_t size() const { return size_; } |
| 24 | 24 |
| 25 uword start() { return address(sizeof(Segment)); } | 25 uword start() { return address(sizeof(Segment)); } |
| 26 uword end() { return address(size_); } | 26 uword end() { return address(size_); } |
| 27 | 27 |
| 28 // Allocate or delete individual segments. | 28 // Allocate or delete individual segments. |
| 29 static Segment* New(intptr_t size, Segment* next); | 29 static Segment* New(intptr_t size, Segment* next); |
| 30 static void DeleteSegmentList(Segment* segment); | 30 static void DeleteSegmentList(Segment* segment); |
| 31 | 31 |
| 32 private: | 32 private: |
| 33 static void IncrementMemoryCapacity(uintptr_t size); | |
| 34 static void DecrementMemoryCapacity(uintptr_t size); | |
|
siva
2017/03/22 23:36:06
The two versions of IncrementMemoryCapacity/Decrem
| |
| 35 | |
| 33 Segment* next_; | 36 Segment* next_; |
| 34 intptr_t size_; | 37 intptr_t size_; |
| 35 | 38 |
| 36 // Computes the address of the nth byte in this segment. | 39 // Computes the address of the nth byte in this segment. |
| 37 uword address(int n) { return reinterpret_cast<uword>(this) + n; } | 40 uword address(int n) { return reinterpret_cast<uword>(this) + n; } |
| 38 | 41 |
| 39 static void Delete(Segment* segment) { free(segment); } | 42 static void Delete(Segment* segment) { free(segment); } |
| 40 | 43 |
| 41 DISALLOW_IMPLICIT_CONSTRUCTORS(Segment); | 44 DISALLOW_IMPLICIT_CONSTRUCTORS(Segment); |
| 42 }; | 45 }; |
| 43 | 46 |
| 44 | 47 |
| 48 Zone::Segment* Zone::Segment::New(intptr_t size, Zone::Segment* next) { | |
| 49 ASSERT(size >= 0); | |
| 50 Segment* result = reinterpret_cast<Segment*>(malloc(size)); | |
| 51 if (result == NULL) { | |
| 52 OUT_OF_MEMORY(); | |
| 53 } | |
| 54 ASSERT(Utils::IsAligned(result->start(), Zone::kAlignment)); | |
| 55 #ifdef DEBUG | |
| 56 // Zap the entire allocated segment (including the header). | |
| 57 memset(result, kZapUninitializedByte, size); | |
| 58 #endif | |
| 59 result->next_ = next; | |
| 60 result->size_ = size; | |
| 61 IncrementMemoryCapacity(size); | |
| 62 return result; | |
| 63 } | |
| 64 | |
| 65 | |
| 45 void Zone::Segment::DeleteSegmentList(Segment* head) { | 66 void Zone::Segment::DeleteSegmentList(Segment* head) { |
| 46 Segment* current = head; | 67 Segment* current = head; |
| 47 Thread* current_thread = Thread::Current(); | |
| 48 while (current != NULL) { | 68 while (current != NULL) { |
| 49 if (current_thread != NULL) { | 69 DecrementMemoryCapacity(current->size()); |
| 50 current_thread->DecrementMemoryUsage(current->size()); | |
| 51 } else if (ApiNativeScope::Current() != NULL) { | |
| 52 // If there is no current thread, we might be inside of a native scope. | |
| 53 ApiNativeScope::DecrementNativeScopeMemoryUsage(current->size()); | |
| 54 } | |
| 55 Segment* next = current->next(); | 70 Segment* next = current->next(); |
| 56 #ifdef DEBUG | 71 #ifdef DEBUG |
| 57 // Zap the entire current segment (including the header). | 72 // Zap the entire current segment (including the header). |
| 58 memset(current, kZapDeletedByte, current->size()); | 73 memset(current, kZapDeletedByte, current->size()); |
| 59 #endif | 74 #endif |
| 60 Segment::Delete(current); | 75 Segment::Delete(current); |
| 61 current = next; | 76 current = next; |
| 62 } | 77 } |
| 63 } | 78 } |
| 64 | 79 |
| 65 | 80 |
| 66 Zone::Segment* Zone::Segment::New(intptr_t size, Zone::Segment* next) { | 81 void Zone::Segment::IncrementMemoryCapacity(uintptr_t size) { |
| 67 ASSERT(size >= 0); | 82 Thread* current_thread = Thread::Current(); |
| 68 Segment* result = reinterpret_cast<Segment*>(malloc(size)); | 83 if (current_thread != NULL) { |
| 69 if (result == NULL) { | 84 current_thread->IncrementMemoryCapacity(size); |
| 70 OUT_OF_MEMORY(); | |
| 71 } | |
| 72 ASSERT(Utils::IsAligned(result->start(), Zone::kAlignment)); | |
| 73 #ifdef DEBUG | |
| 74 // Zap the entire allocated segment (including the header). | |
| 75 memset(result, kZapUninitializedByte, size); | |
| 76 #endif | |
| 77 result->next_ = next; | |
| 78 result->size_ = size; | |
| 79 Thread* current = Thread::Current(); | |
| 80 if (current != NULL) { | |
| 81 current->IncrementMemoryUsage(size); | |
| 82 } else if (ApiNativeScope::Current() != NULL) { | 85 } else if (ApiNativeScope::Current() != NULL) { |
| 83 // If there is no current thread, we might be inside of a native scope. | 86 // If there is no current thread, we might be inside of a native scope. |
| 84 ApiNativeScope::IncrementNativeScopeMemoryUsage(size); | 87 ApiNativeScope::IncrementNativeScopeMemoryCapacity(size); |
| 85 } | 88 } |
| 86 return result; | |
| 87 } | 89 } |
| 88 | 90 |
| 91 | |
| 92 void Zone::Segment::DecrementMemoryCapacity(uintptr_t size) { | |
| 93 Thread* current_thread = Thread::Current(); | |
| 94 if (current_thread != NULL) { | |
| 95 current_thread->DecrementMemoryCapacity(size); | |
| 96 } else if (ApiNativeScope::Current() != NULL) { | |
| 97 // If there is no current thread, we might be inside of a native scope. | |
| 98 ApiNativeScope::DecrementNativeScopeMemoryCapacity(size); | |
| 99 } | |
| 100 } | |
| 101 | |
| 102 | |
| 89 // TODO(bkonyi): We need to account for the initial chunk size when a new zone | 103 // TODO(bkonyi): We need to account for the initial chunk size when a new zone |
| 90 // is created within a new thread or ApiNativeScope when calculating high | 104 // is created within a new thread or ApiNativeScope when calculating high |
| 91 // watermarks or memory consumption. | 105 // watermarks or memory consumption. |
| 92 Zone::Zone() | 106 Zone::Zone() |
| 93 : initial_buffer_(buffer_, kInitialChunkSize), | 107 : initial_buffer_(buffer_, kInitialChunkSize), |
| 94 position_(initial_buffer_.start()), | 108 position_(initial_buffer_.start()), |
| 95 limit_(initial_buffer_.end()), | 109 limit_(initial_buffer_.end()), |
| 96 head_(NULL), | 110 head_(NULL), |
| 97 large_segments_(NULL), | 111 large_segments_(NULL), |
| 98 handles_(), | 112 handles_(), |
| 99 previous_(NULL) { | 113 previous_(NULL) { |
| 100 ASSERT(Utils::IsAligned(position_, kAlignment)); | 114 ASSERT(Utils::IsAligned(position_, kAlignment)); |
| 101 Thread* current = Thread::Current(); | 115 IncrementMemoryCapacity(kInitialChunkSize); |
| 102 if (current != NULL) { | |
| 103 current->IncrementMemoryUsage(kInitialChunkSize); | |
| 104 } | |
| 105 #ifdef DEBUG | 116 #ifdef DEBUG |
| 106 // Zap the entire initial buffer. | 117 // Zap the entire initial buffer. |
| 107 memset(initial_buffer_.pointer(), kZapUninitializedByte, | 118 memset(initial_buffer_.pointer(), kZapUninitializedByte, |
| 108 initial_buffer_.size()); | 119 initial_buffer_.size()); |
| 109 #endif | 120 #endif |
| 110 } | 121 } |
| 111 | 122 |
| 112 | 123 |
| 113 Zone::~Zone() { | 124 Zone::~Zone() { |
| 114 if (FLAG_trace_zones) { | 125 if (FLAG_trace_zones) { |
| 115 DumpZoneSizes(); | 126 DumpZoneSizes(); |
| 116 } | 127 } |
| 117 Thread* current = Thread::Current(); | |
| 118 if (current != NULL) { | |
| 119 current->DecrementMemoryUsage(kInitialChunkSize); | |
| 120 } | |
| 121 DeleteAll(); | 128 DeleteAll(); |
| 129 DecrementMemoryCapacity(kInitialChunkSize); | |
| 122 } | 130 } |
| 123 | 131 |
| 124 | 132 |
| 125 void Zone::DeleteAll() { | 133 void Zone::DeleteAll() { |
| 126 // Traverse the chained list of segments, zapping (in debug mode) | 134 // Traverse the chained list of segments, zapping (in debug mode) |
| 127 // and freeing every zone segment. | 135 // and freeing every zone segment. |
| 128 if (head_ != NULL) { | 136 if (head_ != NULL) { |
| 129 Segment::DeleteSegmentList(head_); | 137 Segment::DeleteSegmentList(head_); |
| 130 } | 138 } |
| 131 if (large_segments_ != NULL) { | 139 if (large_segments_ != NULL) { |
| 132 Segment::DeleteSegmentList(large_segments_); | 140 Segment::DeleteSegmentList(large_segments_); |
| 133 } | 141 } |
| 134 // Reset zone state. | 142 // Reset zone state. |
| 135 #ifdef DEBUG | 143 #ifdef DEBUG |
| 136 memset(initial_buffer_.pointer(), kZapDeletedByte, initial_buffer_.size()); | 144 memset(initial_buffer_.pointer(), kZapDeletedByte, initial_buffer_.size()); |
| 137 #endif | 145 #endif |
| 138 position_ = initial_buffer_.start(); | 146 position_ = initial_buffer_.start(); |
| 139 limit_ = initial_buffer_.end(); | 147 limit_ = initial_buffer_.end(); |
| 140 head_ = NULL; | 148 head_ = NULL; |
| 141 large_segments_ = NULL; | 149 large_segments_ = NULL; |
| 142 previous_ = NULL; | 150 previous_ = NULL; |
| 143 handles_.Reset(); | 151 handles_.Reset(); |
| 144 } | 152 } |
| 145 | 153 |
| 146 | 154 |
| 147 intptr_t Zone::SizeInBytes() const { | 155 uintptr_t Zone::SizeInBytes() const { |
| 148 intptr_t size = 0; | 156 uintptr_t size = 0; |
| 149 for (Segment* s = large_segments_; s != NULL; s = s->next()) { | 157 for (Segment* s = large_segments_; s != NULL; s = s->next()) { |
| 150 size += s->size(); | 158 size += s->size(); |
| 151 } | 159 } |
| 152 if (head_ == NULL) { | 160 if (head_ == NULL) { |
| 153 return size + (position_ - initial_buffer_.start()); | 161 return size + (position_ - initial_buffer_.start()); |
| 154 } | 162 } |
| 155 size += initial_buffer_.size(); | 163 size += initial_buffer_.size(); |
| 156 for (Segment* s = head_->next(); s != NULL; s = s->next()) { | 164 for (Segment* s = head_->next(); s != NULL; s = s->next()) { |
| 157 size += s->size(); | 165 size += s->size(); |
| 158 } | 166 } |
| 159 return size + (position_ - head_->start()); | 167 return size + (position_ - head_->start()); |
| 160 } | 168 } |
| 161 | 169 |
| 162 | 170 |
| 163 intptr_t Zone::CapacityInBytes() const { | 171 uintptr_t Zone::CapacityInBytes() const { |
| 164 intptr_t size = 0; | 172 uintptr_t size = 0; |
| 165 for (Segment* s = large_segments_; s != NULL; s = s->next()) { | 173 for (Segment* s = large_segments_; s != NULL; s = s->next()) { |
| 166 size += s->size(); | 174 size += s->size(); |
| 167 } | 175 } |
| 168 if (head_ == NULL) { | 176 if (head_ == NULL) { |
| 169 return size + initial_buffer_.size(); | 177 return size + initial_buffer_.size(); |
| 170 } | 178 } |
| 171 size += initial_buffer_.size(); | 179 size += initial_buffer_.size(); |
| 172 for (Segment* s = head_; s != NULL; s = s->next()) { | 180 for (Segment* s = head_; s != NULL; s = s->next()) { |
| 173 size += s->size(); | 181 size += s->size(); |
| 174 } | 182 } |
| 175 return size; | 183 return size; |
| 176 } | 184 } |
| 177 | 185 |
| 178 | 186 |
| 187 void Zone::IncrementMemoryCapacity(uintptr_t size) { | |
| 188 Thread* current_thread = Thread::Current(); | |
| 189 if (current_thread != NULL) { | |
| 190 current_thread->IncrementMemoryCapacity(size); | |
| 191 } else if (ApiNativeScope::Current() != NULL) { | |
| 192 // If there is no current thread, we might be inside of a native scope. | |
| 193 ApiNativeScope::IncrementNativeScopeMemoryCapacity(size); | |
| 194 } | |
| 195 } | |
| 196 | |
| 197 | |
| 198 void Zone::DecrementMemoryCapacity(uintptr_t size) { | |
| 199 Thread* current_thread = Thread::Current(); | |
| 200 if (current_thread != NULL) { | |
| 201 current_thread->DecrementMemoryCapacity(size); | |
| 202 } else if (ApiNativeScope::Current() != NULL) { | |
| 203 // If there is no current thread, we might be inside of a native scope. | |
| 204 ApiNativeScope::DecrementNativeScopeMemoryCapacity(size); | |
| 205 } | |
| 206 } | |
| 207 | |
| 208 | |
| 179 uword Zone::AllocateExpand(intptr_t size) { | 209 uword Zone::AllocateExpand(intptr_t size) { |
| 180 ASSERT(size >= 0); | 210 ASSERT(size >= 0); |
| 181 if (FLAG_trace_zones) { | 211 if (FLAG_trace_zones) { |
| 182 OS::PrintErr("*** Expanding zone 0x%" Px "\n", | 212 OS::PrintErr("*** Expanding zone 0x%" Px "\n", |
| 183 reinterpret_cast<intptr_t>(this)); | 213 reinterpret_cast<intptr_t>(this)); |
| 184 DumpZoneSizes(); | 214 DumpZoneSizes(); |
| 185 } | 215 } |
| 186 // Make sure the requested size is already properly aligned and that | 216 // Make sure the requested size is already properly aligned and that |
| 187 // there isn't enough room in the Zone to satisfy the request. | 217 // there isn't enough room in the Zone to satisfy the request. |
| 188 ASSERT(Utils::IsAligned(size, kAlignment)); | 218 ASSERT(Utils::IsAligned(size, kAlignment)); |
| (...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 293 va_end(args); | 323 va_end(args); |
| 294 return buffer; | 324 return buffer; |
| 295 } | 325 } |
| 296 | 326 |
| 297 | 327 |
| 298 char* Zone::VPrint(const char* format, va_list args) { | 328 char* Zone::VPrint(const char* format, va_list args) { |
| 299 return OS::VSCreate(this, format, args); | 329 return OS::VSCreate(this, format, args); |
| 300 } | 330 } |
| 301 | 331 |
| 302 | 332 |
| 303 #ifndef PRODUCT | |
| 304 // TODO(bkonyi): Currently dead code. See issue #28885. | |
| 305 void Zone::PrintJSON(JSONStream* stream) const { | |
| 306 JSONObject jsobj(stream); | |
| 307 intptr_t capacity = CapacityInBytes(); | |
| 308 intptr_t used_size = SizeInBytes(); | |
| 309 jsobj.AddProperty("type", "_Zone"); | |
| 310 jsobj.AddProperty("capacity", capacity); | |
| 311 jsobj.AddProperty("used", used_size); | |
| 312 } | |
| 313 #endif | |
| 314 | |
| 315 | |
| 316 StackZone::StackZone(Thread* thread) : StackResource(thread), zone_() { | 333 StackZone::StackZone(Thread* thread) : StackResource(thread), zone_() { |
| 317 if (FLAG_trace_zones) { | 334 if (FLAG_trace_zones) { |
| 318 OS::PrintErr("*** Starting a new Stack zone 0x%" Px "(0x%" Px ")\n", | 335 OS::PrintErr("*** Starting a new Stack zone 0x%" Px "(0x%" Px ")\n", |
| 319 reinterpret_cast<intptr_t>(this), | 336 reinterpret_cast<intptr_t>(this), |
| 320 reinterpret_cast<intptr_t>(&zone_)); | 337 reinterpret_cast<intptr_t>(&zone_)); |
| 321 } | 338 } |
| 322 zone_.Link(thread->zone()); | 339 zone_.Link(thread->zone()); |
| 323 thread->set_zone(&zone_); | 340 thread->set_zone(&zone_); |
| 324 } | 341 } |
| 325 | 342 |
| 326 | 343 |
| 327 StackZone::~StackZone() { | 344 StackZone::~StackZone() { |
| 328 ASSERT(thread()->zone() == &zone_); | 345 ASSERT(thread()->zone() == &zone_); |
| 329 thread()->set_zone(zone_.previous_); | 346 thread()->set_zone(zone_.previous_); |
| 330 if (FLAG_trace_zones) { | 347 if (FLAG_trace_zones) { |
| 331 OS::PrintErr("*** Deleting Stack zone 0x%" Px "(0x%" Px ")\n", | 348 OS::PrintErr("*** Deleting Stack zone 0x%" Px "(0x%" Px ")\n", |
| 332 reinterpret_cast<intptr_t>(this), | 349 reinterpret_cast<intptr_t>(this), |
| 333 reinterpret_cast<intptr_t>(&zone_)); | 350 reinterpret_cast<intptr_t>(&zone_)); |
| 334 } | 351 } |
| 335 } | 352 } |
| 336 | 353 |
| 337 } // namespace dart | 354 } // namespace dart |
| OLD | NEW |