Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: runtime/vm/zone.cc

Issue 2762323002: Reimplemented zone memory tracking to avoid race conditions that were causing crashes in the previo… (Closed)
Patch Set: Reimplemented zone memory tracking to avoid race conditions that were causing crashes in the previo… Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/zone.h" 5 #include "vm/zone.h"
6 6
7 #include "platform/assert.h" 7 #include "platform/assert.h"
8 #include "platform/utils.h" 8 #include "platform/utils.h"
9 #include "vm/dart_api_state.h" 9 #include "vm/dart_api_state.h"
10 #include "vm/flags.h" 10 #include "vm/flags.h"
(...skipping 29 matching lines...) Expand all
40 40
41 DISALLOW_IMPLICIT_CONSTRUCTORS(Segment); 41 DISALLOW_IMPLICIT_CONSTRUCTORS(Segment);
42 }; 42 };
43 43
44 44
45 void Zone::Segment::DeleteSegmentList(Segment* head) { 45 void Zone::Segment::DeleteSegmentList(Segment* head) {
46 Segment* current = head; 46 Segment* current = head;
47 Thread* current_thread = Thread::Current(); 47 Thread* current_thread = Thread::Current();
48 while (current != NULL) { 48 while (current != NULL) {
49 if (current_thread != NULL) { 49 if (current_thread != NULL) {
50 current_thread->DecrementMemoryUsage(current->size()); 50 current_thread->DecrementMemoryCapacity(current->size());
Cutch 2017/03/22 17:21:49 ditto here: IncrementThreadMemoryCapacity / Decre
bkonyi 2017/03/22 18:01:48 Done.
51 } else if (ApiNativeScope::Current() != NULL) { 51 } else if (ApiNativeScope::Current() != NULL) {
52 // If there is no current thread, we might be inside of a native scope. 52 // If there is no current thread, we might be inside of a native scope.
53 ApiNativeScope::DecrementNativeScopeMemoryUsage(current->size()); 53 ApiNativeScope::DecrementNativeScopeMemoryCapacity(current->size());
54 } 54 }
55 Segment* next = current->next(); 55 Segment* next = current->next();
56 #ifdef DEBUG 56 #ifdef DEBUG
57 // Zap the entire current segment (including the header). 57 // Zap the entire current segment (including the header).
58 memset(current, kZapDeletedByte, current->size()); 58 memset(current, kZapDeletedByte, current->size());
59 #endif 59 #endif
60 Segment::Delete(current); 60 Segment::Delete(current);
61 current = next; 61 current = next;
62 } 62 }
63 } 63 }
64 64
65 65
66 Zone::Segment* Zone::Segment::New(intptr_t size, Zone::Segment* next) { 66 Zone::Segment* Zone::Segment::New(intptr_t size, Zone::Segment* next) {
67 ASSERT(size >= 0); 67 ASSERT(size >= 0);
68 Segment* result = reinterpret_cast<Segment*>(malloc(size)); 68 Segment* result = reinterpret_cast<Segment*>(malloc(size));
69 if (result == NULL) { 69 if (result == NULL) {
70 OUT_OF_MEMORY(); 70 OUT_OF_MEMORY();
71 } 71 }
72 ASSERT(Utils::IsAligned(result->start(), Zone::kAlignment)); 72 ASSERT(Utils::IsAligned(result->start(), Zone::kAlignment));
73 #ifdef DEBUG 73 #ifdef DEBUG
74 // Zap the entire allocated segment (including the header). 74 // Zap the entire allocated segment (including the header).
75 memset(result, kZapUninitializedByte, size); 75 memset(result, kZapUninitializedByte, size);
76 #endif 76 #endif
77 result->next_ = next; 77 result->next_ = next;
78 result->size_ = size; 78 result->size_ = size;
79 Thread* current = Thread::Current(); 79 Thread* current_thread = Thread::Current();
80 if (current != NULL) { 80 if (current_thread != NULL) {
81 current->IncrementMemoryUsage(size); 81 current_thread->IncrementMemoryCapacity(size);
82 } else if (ApiNativeScope::Current() != NULL) { 82 } else if (ApiNativeScope::Current() != NULL) {
83 // If there is no current thread, we might be inside of a native scope. 83 // If there is no current thread, we might be inside of a native scope.
84 ApiNativeScope::IncrementNativeScopeMemoryUsage(size); 84 ApiNativeScope::IncrementNativeScopeMemoryCapacity(size);
85 } 85 }
86 return result; 86 return result;
87 } 87 }
88 88
89 // TODO(bkonyi): We need to account for the initial chunk size when a new zone 89 // TODO(bkonyi): We need to account for the initial chunk size when a new zone
90 // is created within a new thread or ApiNativeScope when calculating high 90 // is created within a new thread or ApiNativeScope when calculating high
91 // watermarks or memory consumption. 91 // watermarks or memory consumption.
92 Zone::Zone() 92 Zone::Zone()
93 : initial_buffer_(buffer_, kInitialChunkSize), 93 : initial_buffer_(buffer_, kInitialChunkSize),
94 position_(initial_buffer_.start()), 94 position_(initial_buffer_.start()),
95 limit_(initial_buffer_.end()), 95 limit_(initial_buffer_.end()),
96 head_(NULL), 96 head_(NULL),
97 large_segments_(NULL), 97 large_segments_(NULL),
98 handles_(), 98 handles_(),
99 previous_(NULL) { 99 previous_(NULL) {
100 ASSERT(Utils::IsAligned(position_, kAlignment)); 100 ASSERT(Utils::IsAligned(position_, kAlignment));
101 Thread* current = Thread::Current(); 101 Thread* current_thread = Thread::Current();
102 if (current != NULL) { 102 if (current_thread != NULL) {
103 current->IncrementMemoryUsage(kInitialChunkSize); 103 current_thread->IncrementMemoryCapacity(kInitialChunkSize);
104 } 104 }
105 #ifdef DEBUG 105 #ifdef DEBUG
106 // Zap the entire initial buffer. 106 // Zap the entire initial buffer.
107 memset(initial_buffer_.pointer(), kZapUninitializedByte, 107 memset(initial_buffer_.pointer(), kZapUninitializedByte,
108 initial_buffer_.size()); 108 initial_buffer_.size());
109 #endif 109 #endif
110 } 110 }
111 111
112 112
113 Zone::~Zone() { 113 Zone::~Zone() {
114 if (FLAG_trace_zones) { 114 if (FLAG_trace_zones) {
115 DumpZoneSizes(); 115 DumpZoneSizes();
116 } 116 }
117 Thread* current = Thread::Current(); 117 DeleteAll();
118 if (current != NULL) { 118
119 current->DecrementMemoryUsage(kInitialChunkSize); 119 Thread* current_thread = Thread::Current();
120 if (current_thread != NULL) {
121 current_thread->DecrementMemoryCapacity(kInitialChunkSize);
120 } 122 }
121 DeleteAll();
122 } 123 }
123 124
124 125
125 void Zone::DeleteAll() { 126 void Zone::DeleteAll() {
127 Thread* current_thread = Thread::Current();
128 if (current_thread != NULL) {
129 current_thread->DecrementMemoryUsage(SizeInBytes());
130 }
126 // Traverse the chained list of segments, zapping (in debug mode) 131 // Traverse the chained list of segments, zapping (in debug mode)
127 // and freeing every zone segment. 132 // and freeing every zone segment.
128 if (head_ != NULL) { 133 if (head_ != NULL) {
129 Segment::DeleteSegmentList(head_); 134 Segment::DeleteSegmentList(head_);
130 } 135 }
131 if (large_segments_ != NULL) { 136 if (large_segments_ != NULL) {
132 Segment::DeleteSegmentList(large_segments_); 137 Segment::DeleteSegmentList(large_segments_);
133 } 138 }
134 // Reset zone state. 139 // Reset zone state.
135 #ifdef DEBUG 140 #ifdef DEBUG
136 memset(initial_buffer_.pointer(), kZapDeletedByte, initial_buffer_.size()); 141 memset(initial_buffer_.pointer(), kZapDeletedByte, initial_buffer_.size());
137 #endif 142 #endif
138 position_ = initial_buffer_.start(); 143 position_ = initial_buffer_.start();
139 limit_ = initial_buffer_.end(); 144 limit_ = initial_buffer_.end();
140 head_ = NULL; 145 head_ = NULL;
141 large_segments_ = NULL; 146 large_segments_ = NULL;
142 previous_ = NULL; 147 previous_ = NULL;
143 handles_.Reset(); 148 handles_.Reset();
144 } 149 }
145 150
146 151
147 intptr_t Zone::SizeInBytes() const { 152 uintptr_t Zone::SizeInBytes() const {
148 intptr_t size = 0; 153 uintptr_t size = 0;
149 for (Segment* s = large_segments_; s != NULL; s = s->next()) { 154 for (Segment* s = large_segments_; s != NULL; s = s->next()) {
150 size += s->size(); 155 size += s->size();
151 } 156 }
152 if (head_ == NULL) { 157 if (head_ == NULL) {
153 return size + (position_ - initial_buffer_.start()); 158 return size + (position_ - initial_buffer_.start());
154 } 159 }
155 size += initial_buffer_.size(); 160 size += initial_buffer_.size();
156 for (Segment* s = head_->next(); s != NULL; s = s->next()) { 161 for (Segment* s = head_->next(); s != NULL; s = s->next()) {
157 size += s->size(); 162 size += s->size();
158 } 163 }
(...skipping 10 matching lines...) Expand all
169 return size + initial_buffer_.size(); 174 return size + initial_buffer_.size();
170 } 175 }
171 size += initial_buffer_.size(); 176 size += initial_buffer_.size();
172 for (Segment* s = head_; s != NULL; s = s->next()) { 177 for (Segment* s = head_; s != NULL; s = s->next()) {
173 size += s->size(); 178 size += s->size();
174 } 179 }
175 return size; 180 return size;
176 } 181 }
177 182
178 183
184 intptr_t Zone::FreeCapacityInBytes() const {
185 if (head_ == NULL) {
186 return initial_buffer_.start() + initial_buffer_.size() - position_;
187 } else {
188 return head_->start() + head_->size() - position_;
189 }
190 }
191
192
179 uword Zone::AllocateExpand(intptr_t size) { 193 uword Zone::AllocateExpand(intptr_t size) {
180 ASSERT(size >= 0); 194 ASSERT(size >= 0);
181 if (FLAG_trace_zones) { 195 if (FLAG_trace_zones) {
182 OS::PrintErr("*** Expanding zone 0x%" Px "\n", 196 OS::PrintErr("*** Expanding zone 0x%" Px "\n",
183 reinterpret_cast<intptr_t>(this)); 197 reinterpret_cast<intptr_t>(this));
184 DumpZoneSizes(); 198 DumpZoneSizes();
185 } 199 }
186 // Make sure the requested size is already properly aligned and that 200 // Make sure the requested size is already properly aligned and that
187 // there isn't enough room in the Zone to satisfy the request. 201 // there isn't enough room in the Zone to satisfy the request.
188 ASSERT(Utils::IsAligned(size, kAlignment)); 202 ASSERT(Utils::IsAligned(size, kAlignment));
189 intptr_t free_size = (limit_ - position_); 203 intptr_t free_size = (limit_ - position_);
190 ASSERT(free_size < size); 204 ASSERT(free_size < size);
191 205
192 // First check to see if we should just chain it as a large segment. 206 // First check to see if we should just chain it as a large segment.
193 intptr_t max_size = 207 intptr_t max_size =
194 Utils::RoundDown(kSegmentSize - sizeof(Segment), kAlignment); 208 Utils::RoundDown(kSegmentSize - sizeof(Segment), kAlignment);
195 ASSERT(max_size > 0); 209 ASSERT(max_size > 0);
196 if (size > max_size) { 210 if (size > max_size) {
197 return AllocateLargeSegment(size); 211 return AllocateLargeSegment(size);
198 } 212 }
199 213
214 intptr_t remaining_capacity = FreeCapacityInBytes();
215
200 // Allocate another segment and chain it up. 216 // Allocate another segment and chain it up.
201 head_ = Segment::New(kSegmentSize, head_); 217 head_ = Segment::New(kSegmentSize, head_);
202 218
219 Thread* current_thread = Thread::Current();
220 if (current_thread != NULL) {
221 current_thread->IncrementMemoryUsage(remaining_capacity + size);
222 }
223
203 // Recompute 'position' and 'limit' based on the new head segment. 224 // Recompute 'position' and 'limit' based on the new head segment.
204 uword result = Utils::RoundUp(head_->start(), kAlignment); 225 uword result = Utils::RoundUp(head_->start(), kAlignment);
205 position_ = result + size; 226 position_ = result + size;
206 limit_ = head_->end(); 227 limit_ = head_->end();
207 ASSERT(position_ <= limit_); 228 ASSERT(position_ <= limit_);
208 return result; 229 return result;
209 } 230 }
210 231
211 232
212 uword Zone::AllocateLargeSegment(intptr_t size) { 233 uword Zone::AllocateLargeSegment(intptr_t size) {
213 ASSERT(size >= 0); 234 ASSERT(size >= 0);
214 // Make sure the requested size is already properly aligned and that 235 // Make sure the requested size is already properly aligned and that
215 // there isn't enough room in the Zone to satisfy the request. 236 // there isn't enough room in the Zone to satisfy the request.
216 ASSERT(Utils::IsAligned(size, kAlignment)); 237 ASSERT(Utils::IsAligned(size, kAlignment));
217 intptr_t free_size = (limit_ - position_); 238 intptr_t free_size = (limit_ - position_);
218 ASSERT(free_size < size); 239 ASSERT(free_size < size);
219 240
220 // Create a new large segment and chain it up. 241 // Create a new large segment and chain it up.
221 ASSERT(Utils::IsAligned(sizeof(Segment), kAlignment)); 242 ASSERT(Utils::IsAligned(sizeof(Segment), kAlignment));
222 size += sizeof(Segment); // Account for book keeping fields in size. 243 size += sizeof(Segment); // Account for book keeping fields in size.
223 large_segments_ = Segment::New(size, large_segments_); 244 large_segments_ = Segment::New(size, large_segments_);
224 245
246 Thread* current_thread = Thread::Current();
247 if (current_thread != NULL) {
248 current_thread->IncrementMemoryUsage(size);
249 }
250
225 uword result = Utils::RoundUp(large_segments_->start(), kAlignment); 251 uword result = Utils::RoundUp(large_segments_->start(), kAlignment);
226 return result; 252 return result;
227 } 253 }
228 254
229 255
230 char* Zone::MakeCopyOfString(const char* str) { 256 char* Zone::MakeCopyOfString(const char* str) {
231 intptr_t len = strlen(str) + 1; // '\0'-terminated. 257 intptr_t len = strlen(str) + 1; // '\0'-terminated.
232 char* copy = Alloc<char>(len); 258 char* copy = Alloc<char>(len);
233 strncpy(copy, str, len); 259 strncpy(copy, str, len);
234 return copy; 260 return copy;
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
293 va_end(args); 319 va_end(args);
294 return buffer; 320 return buffer;
295 } 321 }
296 322
297 323
298 char* Zone::VPrint(const char* format, va_list args) { 324 char* Zone::VPrint(const char* format, va_list args) {
299 return OS::VSCreate(this, format, args); 325 return OS::VSCreate(this, format, args);
300 } 326 }
301 327
302 328
303 #ifndef PRODUCT
304 // TODO(bkonyi): Currently dead code. See issue #28885.
305 void Zone::PrintJSON(JSONStream* stream) const {
306 JSONObject jsobj(stream);
307 intptr_t capacity = CapacityInBytes();
308 intptr_t used_size = SizeInBytes();
309 jsobj.AddProperty("type", "_Zone");
310 jsobj.AddProperty("capacity", capacity);
311 jsobj.AddProperty("used", used_size);
312 }
313 #endif
314
315
316 StackZone::StackZone(Thread* thread) : StackResource(thread), zone_() { 329 StackZone::StackZone(Thread* thread) : StackResource(thread), zone_() {
317 if (FLAG_trace_zones) { 330 if (FLAG_trace_zones) {
318 OS::PrintErr("*** Starting a new Stack zone 0x%" Px "(0x%" Px ")\n", 331 OS::PrintErr("*** Starting a new Stack zone 0x%" Px "(0x%" Px ")\n",
319 reinterpret_cast<intptr_t>(this), 332 reinterpret_cast<intptr_t>(this),
320 reinterpret_cast<intptr_t>(&zone_)); 333 reinterpret_cast<intptr_t>(&zone_));
321 } 334 }
322 zone_.Link(thread->zone()); 335 zone_.Link(thread->zone());
323 thread->set_zone(&zone_); 336 thread->set_zone(&zone_);
324 } 337 }
325 338
326 339
327 StackZone::~StackZone() { 340 StackZone::~StackZone() {
328 ASSERT(thread()->zone() == &zone_); 341 ASSERT(thread()->zone() == &zone_);
329 thread()->set_zone(zone_.previous_); 342 thread()->set_zone(zone_.previous_);
330 if (FLAG_trace_zones) { 343 if (FLAG_trace_zones) {
331 OS::PrintErr("*** Deleting Stack zone 0x%" Px "(0x%" Px ")\n", 344 OS::PrintErr("*** Deleting Stack zone 0x%" Px "(0x%" Px ")\n",
332 reinterpret_cast<intptr_t>(this), 345 reinterpret_cast<intptr_t>(this),
333 reinterpret_cast<intptr_t>(&zone_)); 346 reinterpret_cast<intptr_t>(&zone_));
334 } 347 }
335 } 348 }
336 349
337 } // namespace dart 350 } // namespace dart
OLDNEW
« runtime/vm/zone.h ('K') | « runtime/vm/zone.h ('k') | runtime/vm/zone_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698