OLD | NEW |
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/heap.h" | 5 #include "vm/heap.h" |
6 | 6 |
7 #include "platform/assert.h" | 7 #include "platform/assert.h" |
8 #include "platform/utils.h" | 8 #include "platform/utils.h" |
9 #include "vm/flags.h" | 9 #include "vm/flags.h" |
10 #include "vm/isolate.h" | 10 #include "vm/isolate.h" |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
92 return addr; | 92 return addr; |
93 } | 93 } |
94 | 94 |
95 | 95 |
96 uword Heap::AllocateOld(intptr_t size, HeapPage::PageType type) { | 96 uword Heap::AllocateOld(intptr_t size, HeapPage::PageType type) { |
97 ASSERT(Thread::Current()->no_safepoint_scope_depth() == 0); | 97 ASSERT(Thread::Current()->no_safepoint_scope_depth() == 0); |
98 uword addr = old_space_.TryAllocate(size, type); | 98 uword addr = old_space_.TryAllocate(size, type); |
99 if (addr != 0) { | 99 if (addr != 0) { |
100 return addr; | 100 return addr; |
101 } | 101 } |
102 // If we are in the process of running a sweep wait for the sweeper to free | 102 // If we are in the process of running a sweep, wait for the sweeper to free |
103 // memory. | 103 // memory. |
| 104 Thread* thread = Thread::Current(); |
104 { | 105 { |
105 MonitorLocker ml(old_space_.tasks_lock()); | 106 MonitorLocker ml(old_space_.tasks_lock()); |
106 addr = old_space_.TryAllocate(size, type); | 107 addr = old_space_.TryAllocate(size, type); |
107 while ((addr == 0) && (old_space_.tasks() > 0)) { | 108 while ((addr == 0) && (old_space_.tasks() > 0)) { |
108 ml.Wait(); | 109 ml.WaitWithSafepointCheck(thread); |
109 addr = old_space_.TryAllocate(size, type); | 110 addr = old_space_.TryAllocate(size, type); |
110 } | 111 } |
111 } | 112 } |
112 if (addr != 0) { | 113 if (addr != 0) { |
113 return addr; | 114 return addr; |
114 } | 115 } |
115 Thread* thread = Thread::Current(); | |
116 if (thread->CanCollectGarbage()) { | 116 if (thread->CanCollectGarbage()) { |
117 // All GC tasks finished without allocating successfully. Run a full GC. | 117 // All GC tasks finished without allocating successfully. Run a full GC. |
118 CollectAllGarbage(); | 118 CollectAllGarbage(); |
119 addr = old_space_.TryAllocate(size, type); | 119 addr = old_space_.TryAllocate(size, type); |
120 if (addr != 0) { | 120 if (addr != 0) { |
121 return addr; | 121 return addr; |
122 } | 122 } |
123 // Wait for all of the concurrent tasks to finish before giving up. | 123 // Wait for all of the concurrent tasks to finish before giving up. |
124 { | 124 { |
125 MonitorLocker ml(old_space_.tasks_lock()); | 125 MonitorLocker ml(old_space_.tasks_lock()); |
126 addr = old_space_.TryAllocate(size, type); | 126 addr = old_space_.TryAllocate(size, type); |
127 while ((addr == 0) && (old_space_.tasks() > 0)) { | 127 while ((addr == 0) && (old_space_.tasks() > 0)) { |
128 ml.Wait(); | 128 ml.WaitWithSafepointCheck(thread); |
129 addr = old_space_.TryAllocate(size, type); | 129 addr = old_space_.TryAllocate(size, type); |
130 } | 130 } |
131 } | 131 } |
132 if (addr != 0) { | 132 if (addr != 0) { |
133 return addr; | 133 return addr; |
134 } | 134 } |
135 // Force growth before attempting another synchronous GC. | 135 // Force growth before attempting another synchronous GC. |
136 addr = old_space_.TryAllocate(size, type, PageSpace::kForceGrowth); | 136 addr = old_space_.TryAllocate(size, type, PageSpace::kForceGrowth); |
137 if (addr != 0) { | 137 if (addr != 0) { |
138 return addr; | 138 return addr; |
139 } | 139 } |
140 // Before throwing an out-of-memory error try a synchronous GC. | 140 // Before throwing an out-of-memory error try a synchronous GC. |
141 CollectAllGarbage(); | 141 CollectAllGarbage(); |
142 { | 142 { |
143 MonitorLocker ml(old_space_.tasks_lock()); | 143 MonitorLocker ml(old_space_.tasks_lock()); |
144 while (old_space_.tasks() > 0) { | 144 while (old_space_.tasks() > 0) { |
145 ml.Wait(); | 145 ml.WaitWithSafepointCheck(thread); |
146 } | 146 } |
147 } | 147 } |
148 } | 148 } |
149 addr = old_space_.TryAllocate(size, type, PageSpace::kForceGrowth); | 149 addr = old_space_.TryAllocate(size, type, PageSpace::kForceGrowth); |
150 if (addr != 0) { | 150 if (addr != 0) { |
151 return addr; | 151 return addr; |
152 } | 152 } |
153 // Give up allocating this object. | 153 // Give up allocating this object. |
154 OS::PrintErr( | 154 OS::PrintErr( |
155 "Exhausted heap space, trying to allocate %" Pd " bytes.\n", size); | 155 "Exhausted heap space, trying to allocate %" Pd " bytes.\n", size); |
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
307 } | 307 } |
308 raw_obj = FindOldObject(visitor); | 308 raw_obj = FindOldObject(visitor); |
309 if (raw_obj != Object::null()) { | 309 if (raw_obj != Object::null()) { |
310 return raw_obj; | 310 return raw_obj; |
311 } | 311 } |
312 raw_obj = FindObjectInCodeSpace(visitor); | 312 raw_obj = FindObjectInCodeSpace(visitor); |
313 return raw_obj; | 313 return raw_obj; |
314 } | 314 } |
315 | 315 |
316 | 316 |
317 bool Heap::BeginNewSpaceGC() { | 317 bool Heap::BeginNewSpaceGC(Thread* thread) { |
318 MonitorLocker ml(&gc_in_progress_monitor_); | 318 MonitorLocker ml(&gc_in_progress_monitor_); |
319 bool start_gc_on_thread = true; | 319 bool start_gc_on_thread = true; |
320 while (gc_new_space_in_progress_ || | 320 while (gc_new_space_in_progress_ || |
321 gc_old_space_in_progress_) { | 321 gc_old_space_in_progress_) { |
322 start_gc_on_thread = !gc_new_space_in_progress_; | 322 start_gc_on_thread = !gc_new_space_in_progress_; |
323 ml.Wait(); | 323 ml.WaitWithSafepointCheck(thread); |
324 } | 324 } |
325 if (start_gc_on_thread) { | 325 if (start_gc_on_thread) { |
326 gc_new_space_in_progress_ = true; | 326 gc_new_space_in_progress_ = true; |
327 return true; | 327 return true; |
328 } | 328 } |
329 return false; | 329 return false; |
330 } | 330 } |
331 | 331 |
332 | 332 |
333 void Heap::EndNewSpaceGC() { | 333 void Heap::EndNewSpaceGC() { |
334 MonitorLocker ml(&gc_in_progress_monitor_); | 334 MonitorLocker ml(&gc_in_progress_monitor_); |
335 ASSERT(gc_new_space_in_progress_); | 335 ASSERT(gc_new_space_in_progress_); |
336 gc_new_space_in_progress_ = false; | 336 gc_new_space_in_progress_ = false; |
337 ml.NotifyAll(); | 337 ml.NotifyAll(); |
338 } | 338 } |
339 | 339 |
340 | 340 |
341 bool Heap::BeginOldSpaceGC() { | 341 bool Heap::BeginOldSpaceGC(Thread* thread) { |
342 MonitorLocker ml(&gc_in_progress_monitor_); | 342 MonitorLocker ml(&gc_in_progress_monitor_); |
343 bool start_gc_on_thread = true; | 343 bool start_gc_on_thread = true; |
344 while (gc_new_space_in_progress_ || | 344 while (gc_new_space_in_progress_ || |
345 gc_old_space_in_progress_) { | 345 gc_old_space_in_progress_) { |
346 start_gc_on_thread = !gc_old_space_in_progress_; | 346 start_gc_on_thread = !gc_old_space_in_progress_; |
347 ml.Wait(); | 347 ml.WaitWithSafepointCheck(thread); |
348 } | 348 } |
349 if (start_gc_on_thread) { | 349 if (start_gc_on_thread) { |
350 gc_old_space_in_progress_ = true; | 350 gc_old_space_in_progress_ = true; |
351 return true; | 351 return true; |
352 } | 352 } |
353 return false; | 353 return false; |
354 } | 354 } |
355 | 355 |
356 | 356 |
357 void Heap::EndOldSpaceGC() { | 357 void Heap::EndOldSpaceGC() { |
(...skipping 10 matching lines...) Expand all Loading... |
368 class_table->ResetCountersNew(); | 368 class_table->ResetCountersNew(); |
369 } else { | 369 } else { |
370 class_table->ResetCountersOld(); | 370 class_table->ResetCountersOld(); |
371 } | 371 } |
372 } | 372 } |
373 | 373 |
374 | 374 |
375 void Heap::CollectNewSpaceGarbage(Thread* thread, | 375 void Heap::CollectNewSpaceGarbage(Thread* thread, |
376 ApiCallbacks api_callbacks, | 376 ApiCallbacks api_callbacks, |
377 GCReason reason) { | 377 GCReason reason) { |
378 if (BeginNewSpaceGC()) { | 378 if (BeginNewSpaceGC(thread)) { |
379 bool invoke_api_callbacks = (api_callbacks == kInvokeApiCallbacks); | 379 bool invoke_api_callbacks = (api_callbacks == kInvokeApiCallbacks); |
380 RecordBeforeGC(kNew, reason); | 380 RecordBeforeGC(kNew, reason); |
381 VMTagScope tagScope(thread, VMTag::kGCNewSpaceTagId); | 381 VMTagScope tagScope(thread, VMTag::kGCNewSpaceTagId); |
382 TimelineDurationScope tds(thread, | 382 TimelineDurationScope tds(thread, |
383 isolate()->GetGCStream(), | 383 isolate()->GetGCStream(), |
384 "CollectNewGeneration"); | 384 "CollectNewGeneration"); |
385 UpdateClassHeapStatsBeforeGC(kNew); | 385 UpdateClassHeapStatsBeforeGC(kNew); |
386 new_space_.Scavenge(invoke_api_callbacks); | 386 new_space_.Scavenge(invoke_api_callbacks); |
387 isolate()->class_table()->UpdatePromoted(); | 387 isolate()->class_table()->UpdatePromoted(); |
388 UpdatePretenurePolicy(); | 388 UpdatePretenurePolicy(); |
389 RecordAfterGC(kNew); | 389 RecordAfterGC(kNew); |
390 PrintStats(); | 390 PrintStats(); |
391 EndNewSpaceGC(); | 391 EndNewSpaceGC(); |
392 if (old_space_.NeedsGarbageCollection()) { | 392 if (old_space_.NeedsGarbageCollection()) { |
393 // Old collections should call the API callbacks. | 393 // Old collections should call the API callbacks. |
394 CollectOldSpaceGarbage(thread, kInvokeApiCallbacks, kPromotion); | 394 CollectOldSpaceGarbage(thread, kInvokeApiCallbacks, kPromotion); |
395 } | 395 } |
396 } | 396 } |
397 } | 397 } |
398 | 398 |
399 | 399 |
400 void Heap::CollectOldSpaceGarbage(Thread* thread, | 400 void Heap::CollectOldSpaceGarbage(Thread* thread, |
401 ApiCallbacks api_callbacks, | 401 ApiCallbacks api_callbacks, |
402 GCReason reason) { | 402 GCReason reason) { |
403 if (BeginOldSpaceGC()) { | 403 if (BeginOldSpaceGC(thread)) { |
404 bool invoke_api_callbacks = (api_callbacks == kInvokeApiCallbacks); | 404 bool invoke_api_callbacks = (api_callbacks == kInvokeApiCallbacks); |
405 RecordBeforeGC(kOld, reason); | 405 RecordBeforeGC(kOld, reason); |
406 VMTagScope tagScope(thread, VMTag::kGCOldSpaceTagId); | 406 VMTagScope tagScope(thread, VMTag::kGCOldSpaceTagId); |
407 TimelineDurationScope tds(thread, | 407 TimelineDurationScope tds(thread, |
408 isolate()->GetGCStream(), | 408 isolate()->GetGCStream(), |
409 "CollectOldGeneration"); | 409 "CollectOldGeneration"); |
410 UpdateClassHeapStatsBeforeGC(kOld); | 410 UpdateClassHeapStatsBeforeGC(kOld); |
411 old_space_.MarkSweep(invoke_api_callbacks); | 411 old_space_.MarkSweep(invoke_api_callbacks); |
412 RecordAfterGC(kOld); | 412 RecordAfterGC(kOld); |
413 PrintStats(); | 413 PrintStats(); |
(...skipping 430 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
844 Dart::vm_isolate()->heap()->WriteProtect(false, include_code_pages_); | 844 Dart::vm_isolate()->heap()->WriteProtect(false, include_code_pages_); |
845 } | 845 } |
846 | 846 |
847 | 847 |
848 WritableVMIsolateScope::~WritableVMIsolateScope() { | 848 WritableVMIsolateScope::~WritableVMIsolateScope() { |
849 ASSERT(Dart::vm_isolate()->heap()->UsedInWords(Heap::kNew) == 0); | 849 ASSERT(Dart::vm_isolate()->heap()->UsedInWords(Heap::kNew) == 0); |
850 Dart::vm_isolate()->heap()->WriteProtect(true, include_code_pages_); | 850 Dart::vm_isolate()->heap()->WriteProtect(true, include_code_pages_); |
851 } | 851 } |
852 | 852 |
853 } // namespace dart | 853 } // namespace dart |
OLD | NEW |