Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(187)

Side by Side Diff: runtime/vm/zone.cc

Issue 2762323002: Reimplemented zone memory tracking to avoid race conditions that were causing crashes in the previo… (Closed)
Patch Set: Final change Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/zone.h ('k') | runtime/vm/zone_test.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/zone.h" 5 #include "vm/zone.h"
6 6
7 #include "platform/assert.h" 7 #include "platform/assert.h"
8 #include "platform/utils.h" 8 #include "platform/utils.h"
9 #include "vm/dart_api_state.h" 9 #include "vm/dart_api_state.h"
10 #include "vm/flags.h" 10 #include "vm/flags.h"
(...skipping 10 matching lines...) Expand all
21 public: 21 public:
22 Segment* next() const { return next_; } 22 Segment* next() const { return next_; }
23 intptr_t size() const { return size_; } 23 intptr_t size() const { return size_; }
24 24
25 uword start() { return address(sizeof(Segment)); } 25 uword start() { return address(sizeof(Segment)); }
26 uword end() { return address(size_); } 26 uword end() { return address(size_); }
27 27
28 // Allocate or delete individual segments. 28 // Allocate or delete individual segments.
29 static Segment* New(intptr_t size, Segment* next); 29 static Segment* New(intptr_t size, Segment* next);
30 static void DeleteSegmentList(Segment* segment); 30 static void DeleteSegmentList(Segment* segment);
31 static void IncrementMemoryCapacity(uintptr_t size);
32 static void DecrementMemoryCapacity(uintptr_t size);
31 33
32 private: 34 private:
33 Segment* next_; 35 Segment* next_;
34 intptr_t size_; 36 intptr_t size_;
35 37
36 // Computes the address of the nth byte in this segment. 38 // Computes the address of the nth byte in this segment.
37 uword address(int n) { return reinterpret_cast<uword>(this) + n; } 39 uword address(int n) { return reinterpret_cast<uword>(this) + n; }
38 40
39 static void Delete(Segment* segment) { free(segment); } 41 static void Delete(Segment* segment) { free(segment); }
40 42
41 DISALLOW_IMPLICIT_CONSTRUCTORS(Segment); 43 DISALLOW_IMPLICIT_CONSTRUCTORS(Segment);
42 }; 44 };
43 45
44 46
47 Zone::Segment* Zone::Segment::New(intptr_t size, Zone::Segment* next) {
48 ASSERT(size >= 0);
49 Segment* result = reinterpret_cast<Segment*>(malloc(size));
50 if (result == NULL) {
51 OUT_OF_MEMORY();
52 }
53 ASSERT(Utils::IsAligned(result->start(), Zone::kAlignment));
54 #ifdef DEBUG
55 // Zap the entire allocated segment (including the header).
56 memset(result, kZapUninitializedByte, size);
57 #endif
58 result->next_ = next;
59 result->size_ = size;
60 IncrementMemoryCapacity(size);
61 return result;
62 }
63
64
45 void Zone::Segment::DeleteSegmentList(Segment* head) { 65 void Zone::Segment::DeleteSegmentList(Segment* head) {
46 Segment* current = head; 66 Segment* current = head;
47 Thread* current_thread = Thread::Current();
48 while (current != NULL) { 67 while (current != NULL) {
49 if (current_thread != NULL) { 68 DecrementMemoryCapacity(current->size());
50 current_thread->DecrementMemoryUsage(current->size());
51 } else if (ApiNativeScope::Current() != NULL) {
52 // If there is no current thread, we might be inside of a native scope.
53 ApiNativeScope::DecrementNativeScopeMemoryUsage(current->size());
54 }
55 Segment* next = current->next(); 69 Segment* next = current->next();
56 #ifdef DEBUG 70 #ifdef DEBUG
57 // Zap the entire current segment (including the header). 71 // Zap the entire current segment (including the header).
58 memset(current, kZapDeletedByte, current->size()); 72 memset(current, kZapDeletedByte, current->size());
59 #endif 73 #endif
60 Segment::Delete(current); 74 Segment::Delete(current);
61 current = next; 75 current = next;
62 } 76 }
63 } 77 }
64 78
65 79
66 Zone::Segment* Zone::Segment::New(intptr_t size, Zone::Segment* next) { 80 void Zone::Segment::IncrementMemoryCapacity(uintptr_t size) {
67 ASSERT(size >= 0); 81 Thread* current_thread = Thread::Current();
68 Segment* result = reinterpret_cast<Segment*>(malloc(size)); 82 if (current_thread != NULL) {
69 if (result == NULL) { 83 current_thread->IncrementMemoryCapacity(size);
70 OUT_OF_MEMORY();
71 }
72 ASSERT(Utils::IsAligned(result->start(), Zone::kAlignment));
73 #ifdef DEBUG
74 // Zap the entire allocated segment (including the header).
75 memset(result, kZapUninitializedByte, size);
76 #endif
77 result->next_ = next;
78 result->size_ = size;
79 Thread* current = Thread::Current();
80 if (current != NULL) {
81 current->IncrementMemoryUsage(size);
82 } else if (ApiNativeScope::Current() != NULL) { 84 } else if (ApiNativeScope::Current() != NULL) {
83 // If there is no current thread, we might be inside of a native scope. 85 // If there is no current thread, we might be inside of a native scope.
84 ApiNativeScope::IncrementNativeScopeMemoryUsage(size); 86 ApiNativeScope::IncrementNativeScopeMemoryCapacity(size);
85 } 87 }
86 return result;
87 } 88 }
88 89
90
91 void Zone::Segment::DecrementMemoryCapacity(uintptr_t size) {
92 Thread* current_thread = Thread::Current();
93 if (current_thread != NULL) {
94 current_thread->DecrementMemoryCapacity(size);
95 } else if (ApiNativeScope::Current() != NULL) {
96 // If there is no current thread, we might be inside of a native scope.
97 ApiNativeScope::DecrementNativeScopeMemoryCapacity(size);
98 }
99 }
100
101
89 // TODO(bkonyi): We need to account for the initial chunk size when a new zone 102 // TODO(bkonyi): We need to account for the initial chunk size when a new zone
90 // is created within a new thread or ApiNativeScope when calculating high 103 // is created within a new thread or ApiNativeScope when calculating high
91 // watermarks or memory consumption. 104 // watermarks or memory consumption.
92 Zone::Zone() 105 Zone::Zone()
93 : initial_buffer_(buffer_, kInitialChunkSize), 106 : initial_buffer_(buffer_, kInitialChunkSize),
94 position_(initial_buffer_.start()), 107 position_(initial_buffer_.start()),
95 limit_(initial_buffer_.end()), 108 limit_(initial_buffer_.end()),
96 head_(NULL), 109 head_(NULL),
97 large_segments_(NULL), 110 large_segments_(NULL),
98 handles_(), 111 handles_(),
99 previous_(NULL) { 112 previous_(NULL) {
100 ASSERT(Utils::IsAligned(position_, kAlignment)); 113 ASSERT(Utils::IsAligned(position_, kAlignment));
101 Thread* current = Thread::Current(); 114 Segment::IncrementMemoryCapacity(kInitialChunkSize);
102 if (current != NULL) {
103 current->IncrementMemoryUsage(kInitialChunkSize);
104 }
105 #ifdef DEBUG 115 #ifdef DEBUG
106 // Zap the entire initial buffer. 116 // Zap the entire initial buffer.
107 memset(initial_buffer_.pointer(), kZapUninitializedByte, 117 memset(initial_buffer_.pointer(), kZapUninitializedByte,
108 initial_buffer_.size()); 118 initial_buffer_.size());
109 #endif 119 #endif
110 } 120 }
111 121
112 122
113 Zone::~Zone() { 123 Zone::~Zone() {
114 if (FLAG_trace_zones) { 124 if (FLAG_trace_zones) {
115 DumpZoneSizes(); 125 DumpZoneSizes();
116 } 126 }
117 Thread* current = Thread::Current();
118 if (current != NULL) {
119 current->DecrementMemoryUsage(kInitialChunkSize);
120 }
121 DeleteAll(); 127 DeleteAll();
128 Segment::DecrementMemoryCapacity(kInitialChunkSize);
122 } 129 }
123 130
124 131
125 void Zone::DeleteAll() { 132 void Zone::DeleteAll() {
126 // Traverse the chained list of segments, zapping (in debug mode) 133 // Traverse the chained list of segments, zapping (in debug mode)
127 // and freeing every zone segment. 134 // and freeing every zone segment.
128 if (head_ != NULL) { 135 if (head_ != NULL) {
129 Segment::DeleteSegmentList(head_); 136 Segment::DeleteSegmentList(head_);
130 } 137 }
131 if (large_segments_ != NULL) { 138 if (large_segments_ != NULL) {
132 Segment::DeleteSegmentList(large_segments_); 139 Segment::DeleteSegmentList(large_segments_);
133 } 140 }
134 // Reset zone state. 141 // Reset zone state.
135 #ifdef DEBUG 142 #ifdef DEBUG
136 memset(initial_buffer_.pointer(), kZapDeletedByte, initial_buffer_.size()); 143 memset(initial_buffer_.pointer(), kZapDeletedByte, initial_buffer_.size());
137 #endif 144 #endif
138 position_ = initial_buffer_.start(); 145 position_ = initial_buffer_.start();
139 limit_ = initial_buffer_.end(); 146 limit_ = initial_buffer_.end();
140 head_ = NULL; 147 head_ = NULL;
141 large_segments_ = NULL; 148 large_segments_ = NULL;
142 previous_ = NULL; 149 previous_ = NULL;
143 handles_.Reset(); 150 handles_.Reset();
144 } 151 }
145 152
146 153
147 intptr_t Zone::SizeInBytes() const { 154 uintptr_t Zone::SizeInBytes() const {
148 intptr_t size = 0; 155 uintptr_t size = 0;
149 for (Segment* s = large_segments_; s != NULL; s = s->next()) { 156 for (Segment* s = large_segments_; s != NULL; s = s->next()) {
150 size += s->size(); 157 size += s->size();
151 } 158 }
152 if (head_ == NULL) { 159 if (head_ == NULL) {
153 return size + (position_ - initial_buffer_.start()); 160 return size + (position_ - initial_buffer_.start());
154 } 161 }
155 size += initial_buffer_.size(); 162 size += initial_buffer_.size();
156 for (Segment* s = head_->next(); s != NULL; s = s->next()) { 163 for (Segment* s = head_->next(); s != NULL; s = s->next()) {
157 size += s->size(); 164 size += s->size();
158 } 165 }
159 return size + (position_ - head_->start()); 166 return size + (position_ - head_->start());
160 } 167 }
161 168
162 169
163 intptr_t Zone::CapacityInBytes() const { 170 uintptr_t Zone::CapacityInBytes() const {
164 intptr_t size = 0; 171 uintptr_t size = 0;
165 for (Segment* s = large_segments_; s != NULL; s = s->next()) { 172 for (Segment* s = large_segments_; s != NULL; s = s->next()) {
166 size += s->size(); 173 size += s->size();
167 } 174 }
168 if (head_ == NULL) { 175 if (head_ == NULL) {
169 return size + initial_buffer_.size(); 176 return size + initial_buffer_.size();
170 } 177 }
171 size += initial_buffer_.size(); 178 size += initial_buffer_.size();
172 for (Segment* s = head_; s != NULL; s = s->next()) { 179 for (Segment* s = head_; s != NULL; s = s->next()) {
173 size += s->size(); 180 size += s->size();
174 } 181 }
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
293 va_end(args); 300 va_end(args);
294 return buffer; 301 return buffer;
295 } 302 }
296 303
297 304
298 char* Zone::VPrint(const char* format, va_list args) { 305 char* Zone::VPrint(const char* format, va_list args) {
299 return OS::VSCreate(this, format, args); 306 return OS::VSCreate(this, format, args);
300 } 307 }
301 308
302 309
303 #ifndef PRODUCT
304 // TODO(bkonyi): Currently dead code. See issue #28885.
305 void Zone::PrintJSON(JSONStream* stream) const {
306 JSONObject jsobj(stream);
307 intptr_t capacity = CapacityInBytes();
308 intptr_t used_size = SizeInBytes();
309 jsobj.AddProperty("type", "_Zone");
310 jsobj.AddProperty("capacity", capacity);
311 jsobj.AddProperty("used", used_size);
312 }
313 #endif
314
315
316 StackZone::StackZone(Thread* thread) : StackResource(thread), zone_() { 310 StackZone::StackZone(Thread* thread) : StackResource(thread), zone_() {
317 if (FLAG_trace_zones) { 311 if (FLAG_trace_zones) {
318 OS::PrintErr("*** Starting a new Stack zone 0x%" Px "(0x%" Px ")\n", 312 OS::PrintErr("*** Starting a new Stack zone 0x%" Px "(0x%" Px ")\n",
319 reinterpret_cast<intptr_t>(this), 313 reinterpret_cast<intptr_t>(this),
320 reinterpret_cast<intptr_t>(&zone_)); 314 reinterpret_cast<intptr_t>(&zone_));
321 } 315 }
322 zone_.Link(thread->zone()); 316 zone_.Link(thread->zone());
323 thread->set_zone(&zone_); 317 thread->set_zone(&zone_);
324 } 318 }
325 319
326 320
327 StackZone::~StackZone() { 321 StackZone::~StackZone() {
328 ASSERT(thread()->zone() == &zone_); 322 ASSERT(thread()->zone() == &zone_);
329 thread()->set_zone(zone_.previous_); 323 thread()->set_zone(zone_.previous_);
330 if (FLAG_trace_zones) { 324 if (FLAG_trace_zones) {
331 OS::PrintErr("*** Deleting Stack zone 0x%" Px "(0x%" Px ")\n", 325 OS::PrintErr("*** Deleting Stack zone 0x%" Px "(0x%" Px ")\n",
332 reinterpret_cast<intptr_t>(this), 326 reinterpret_cast<intptr_t>(this),
333 reinterpret_cast<intptr_t>(&zone_)); 327 reinterpret_cast<intptr_t>(&zone_));
334 } 328 }
335 } 329 }
336 330
337 } // namespace dart 331 } // namespace dart
OLDNEW
« no previous file with comments | « runtime/vm/zone.h ('k') | runtime/vm/zone_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698