Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(251)

Side by Side Diff: runtime/vm/scavenger.cc

Issue 320463003: Make unused semispace available to other isolates. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« runtime/vm/scavenger.h ('K') | « runtime/vm/scavenger.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/scavenger.h" 5 #include "vm/scavenger.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <map> 8 #include <map>
9 #include <utility> 9 #include <utility>
10 10
11 #include "vm/dart.h" 11 #include "vm/dart.h"
12 #include "vm/dart_api_state.h" 12 #include "vm/dart_api_state.h"
13 #include "vm/isolate.h" 13 #include "vm/isolate.h"
14 #include "vm/object.h" 14 #include "vm/object.h"
15 #include "vm/stack_frame.h" 15 #include "vm/stack_frame.h"
16 #include "vm/store_buffer.h" 16 #include "vm/store_buffer.h"
17 #include "vm/thread.h"
17 #include "vm/verifier.h" 18 #include "vm/verifier.h"
18 #include "vm/visitor.h" 19 #include "vm/visitor.h"
19 #include "vm/weak_table.h" 20 #include "vm/weak_table.h"
20 #include "vm/object_id_ring.h" 21 #include "vm/object_id_ring.h"
21 22
22 namespace dart { 23 namespace dart {
23 24
24 DEFINE_FLAG(int, early_tenuring_threshold, 66, "Skip TO space when promoting" 25 DEFINE_FLAG(int, early_tenuring_threshold, 66, "Skip TO space when promoting"
25 " above this percentage."); 26 " above this percentage.");
26 27
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after
278 bool prologue_weak_were_strong_; 279 bool prologue_weak_were_strong_;
279 280
280 DISALLOW_COPY_AND_ASSIGN(ScavengerWeakVisitor); 281 DISALLOW_COPY_AND_ASSIGN(ScavengerWeakVisitor);
281 }; 282 };
282 283
283 284
284 // Visitor used to verify that all old->new references have been added to the 285 // Visitor used to verify that all old->new references have been added to the
285 // StoreBuffers. 286 // StoreBuffers.
286 class VerifyStoreBufferPointerVisitor : public ObjectPointerVisitor { 287 class VerifyStoreBufferPointerVisitor : public ObjectPointerVisitor {
287 public: 288 public:
288 VerifyStoreBufferPointerVisitor(Isolate* isolate, MemoryRegion* to) 289 VerifyStoreBufferPointerVisitor(Isolate* isolate,
290 const SemiSpace* to)
289 : ObjectPointerVisitor(isolate), to_(to) {} 291 : ObjectPointerVisitor(isolate), to_(to) {}
290 292
291 void VisitPointers(RawObject** first, RawObject** last) { 293 void VisitPointers(RawObject** first, RawObject** last) {
292 for (RawObject** current = first; current <= last; current++) { 294 for (RawObject** current = first; current <= last; current++) {
293 RawObject* obj = *current; 295 RawObject* obj = *current;
294 if (obj->IsHeapObject() && obj->IsNewObject()) { 296 if (obj->IsHeapObject() && obj->IsNewObject()) {
295 ASSERT(to_->Contains(RawObject::ToAddr(obj))); 297 ASSERT(to_->Contains(RawObject::ToAddr(obj)));
296 } 298 }
297 } 299 }
298 } 300 }
299 301
300 private: 302 private:
301 MemoryRegion* to_; 303 const SemiSpace* to_;
302 304
303 DISALLOW_COPY_AND_ASSIGN(VerifyStoreBufferPointerVisitor); 305 DISALLOW_COPY_AND_ASSIGN(VerifyStoreBufferPointerVisitor);
304 }; 306 };
305 307
306 308
309 SemiSpace::SemiSpace(VirtualMemory* reserved)
310 : reserved_(reserved), region_(NULL, 0) {
311 if (reserved != NULL) {
312 region_ = MemoryRegion(reserved_->address(), reserved_->size());
313 }
314 }
315
316
317 SemiSpace::~SemiSpace() {
318 if (reserved_ != NULL) {
319 #if defined(DEBUG)
320 memset(reserved_->address(), 0xf3, size());
321 #endif // defined(DEBUG)
322 delete reserved_;
323 }
324 }
325
326
327 Mutex* SemiSpace::mutex_ = NULL;
328 SemiSpace* SemiSpace::cache_ = NULL;
329
330
331 void SemiSpace::InitOnce() {
332 ASSERT(mutex_ == NULL);
333 mutex_ = new Mutex();
334 ASSERT(mutex_ != NULL);
335 }
336
337
338 SemiSpace* SemiSpace::New(intptr_t size) {
339 {
340 MutexLocker locker(mutex_);
341 if (cache_ != NULL && cache_->size() == size) {
342 SemiSpace* result = cache_;
343 cache_ = NULL;
344 return result;
345 }
346 }
347 if (size == 0) {
348 return new SemiSpace(NULL);
349 } else {
350 VirtualMemory* reserved = VirtualMemory::Reserve(size);
351 if (reserved == NULL || !reserved->Commit(VirtualMemory::kReadWrite)) {
Ivan Posva 2014/06/05 22:27:28 (reserved == NULL)
koda 2014/06/05 23:04:23 Done.
352 // TODO(koda): If cache_ is not empty, we could try to delete it.
Ivan Posva 2014/06/05 22:27:28 If Commit failed you probably do not want to leak
koda 2014/06/05 23:04:23 Done.
353 return NULL;
354 }
355 #if defined(DEBUG)
356 memset(reserved->address(), 0xf3, size);
357 #endif // defined(DEBUG)
358 return new SemiSpace(reserved);
359 }
360 }
361
362
363 void SemiSpace::Delete() {
364 SemiSpace* old_cache = NULL;
365 {
366 MutexLocker locker(mutex_);
367 old_cache = cache_;
368 cache_ = this;
369 }
370 delete old_cache;
371 }
372
373
374 void SemiSpace::WriteProtect(bool read_only) {
375 if (reserved_ != NULL) {
376 bool success = reserved_->Protect(
377 read_only ? VirtualMemory::kReadOnly : VirtualMemory::kReadWrite);
378 ASSERT(success);
379 }
380 }
381
382
307 Scavenger::Scavenger(Heap* heap, 383 Scavenger::Scavenger(Heap* heap,
308 intptr_t max_capacity_in_words, 384 intptr_t max_capacity_in_words,
309 uword object_alignment) 385 uword object_alignment)
310 : heap_(heap), 386 : heap_(heap),
311 object_alignment_(object_alignment), 387 object_alignment_(object_alignment),
312 scavenging_(false), 388 scavenging_(false),
313 gc_time_micros_(0), 389 gc_time_micros_(0),
314 collections_(0), 390 collections_(0),
315 external_size_(0) { 391 external_size_(0) {
316 // Verify assumptions about the first word in objects which the scavenger is 392 // Verify assumptions about the first word in objects which the scavenger is
317 // going to use for forwarding pointers. 393 // going to use for forwarding pointers.
318 ASSERT(Object::tags_offset() == 0); 394 ASSERT(Object::tags_offset() == 0);
319 395
320 if (max_capacity_in_words == 0) { 396 const intptr_t semi_space_size = (max_capacity_in_words / 2) * kWordSize;
321 space_ = NULL; 397 to_ = SemiSpace::New(semi_space_size);
322 to_ = new MemoryRegion(NULL, 0); 398 if (to_ == NULL) {
323 from_ = new MemoryRegion(NULL, 0); 399 FATAL("Out of memory.\n");
324 } else {
325 // Allocate the virtual memory for this scavenge heap.
326 space_ = VirtualMemory::Reserve(max_capacity_in_words << kWordSizeLog2);
327 if (space_ == NULL) {
328 FATAL("Out of memory.\n");
329 }
330
331 // Allocate the entire space at the beginning.
332 space_->Commit(false);
333
334 // Setup the semi spaces.
335 uword semi_space_size = space_->size() / 2;
336 ASSERT((semi_space_size & (VirtualMemory::PageSize() - 1)) == 0);
337 to_ = new MemoryRegion(space_->address(), semi_space_size);
338 uword middle = space_->start() + semi_space_size;
339 from_ = new MemoryRegion(reinterpret_cast<void*>(middle), semi_space_size);
340 } 400 }
341 401 from_ = NULL;
342 // Make sure that the two semi-spaces are aligned properly.
343 ASSERT(Utils::IsAligned(to_->start(), kObjectAlignment));
344 ASSERT(Utils::IsAligned(from_->start(), kObjectAlignment));
345 402
346 // Setup local fields. 403 // Setup local fields.
347 top_ = FirstObjectStart(); 404 top_ = FirstObjectStart();
348 resolved_top_ = top_; 405 resolved_top_ = top_;
349 end_ = to_->end(); 406 end_ = to_->end();
350 407
351 survivor_end_ = FirstObjectStart(); 408 survivor_end_ = FirstObjectStart();
352
353 #if defined(DEBUG)
354 memset(to_->pointer(), 0xf3, to_->size());
355 memset(from_->pointer(), 0xf3, from_->size());
356 #endif // defined(DEBUG)
357 } 409 }
358 410
359 411
360 Scavenger::~Scavenger() { 412 Scavenger::~Scavenger() {
361 delete to_; 413 from_->Delete();
Ivan Posva 2014/06/05 22:27:28 ASSERT(from_ == NULL)
koda 2014/06/05 23:04:23 Done.
362 delete from_; 414 to_->Delete();
363 delete space_;
364 } 415 }
365 416
366 417
367 void Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) { 418 void Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) {
368 if (invoke_api_callbacks && (isolate->gc_prologue_callback() != NULL)) { 419 if (invoke_api_callbacks && (isolate->gc_prologue_callback() != NULL)) {
369 (isolate->gc_prologue_callback())(); 420 (isolate->gc_prologue_callback())();
370 } 421 }
371 // Flip the two semi-spaces so that to_ is always the space for allocating 422 // Flip the two semi-spaces so that to_ is always the space for allocating
372 // objects. 423 // objects.
373 MemoryRegion* temp = from_;
374 from_ = to_; 424 from_ = to_;
375 to_ = temp; 425 to_ = SemiSpace::New(from_->size());
426 if (to_ == NULL) {
427 // TODO(koda): We could try to recover (collect old space, wait for another
428 // isolate to finish scavenge, etc.).
429 FATAL("Out of memory.\n");
430 }
376 top_ = FirstObjectStart(); 431 top_ = FirstObjectStart();
377 resolved_top_ = top_; 432 resolved_top_ = top_;
378 end_ = to_->end(); 433 end_ = to_->end();
379 } 434 }
380 435
381 436
382 void Scavenger::Epilogue(Isolate* isolate, 437 void Scavenger::Epilogue(Isolate* isolate,
383 ScavengerVisitor* visitor, 438 ScavengerVisitor* visitor,
384 bool invoke_api_callbacks) { 439 bool invoke_api_callbacks) {
385 // All objects in the to space have been copied from the from space at this 440 // All objects in the to space have been copied from the from space at this
386 // moment. 441 // moment.
387 int promotion_ratio = static_cast<int>( 442 int promotion_ratio = static_cast<int>(
388 (static_cast<double>(visitor->bytes_promoted()) / 443 (static_cast<double>(visitor->bytes_promoted()) /
389 static_cast<double>(to_->size())) * 100.0); 444 static_cast<double>(to_->size())) * 100.0);
390 if (promotion_ratio < FLAG_early_tenuring_threshold) { 445 if (promotion_ratio < FLAG_early_tenuring_threshold) {
391 // Remember the limit to which objects have been copied. 446 // Remember the limit to which objects have been copied.
392 survivor_end_ = top_; 447 survivor_end_ = top_;
393 } else { 448 } else {
394 // Move survivor end to the end of the to_ space, making all surviving 449 // Move survivor end to the end of the to_ space, making all surviving
395 // objects candidates for promotion. 450 // objects candidates for promotion.
396 survivor_end_ = end_; 451 survivor_end_ = end_;
397 } 452 }
398 453
399 #if defined(DEBUG) 454 #if defined(DEBUG)
400 VerifyStoreBufferPointerVisitor verify_store_buffer_visitor(isolate, to_); 455 VerifyStoreBufferPointerVisitor verify_store_buffer_visitor(isolate, to_);
401 heap_->IterateOldPointers(&verify_store_buffer_visitor); 456 heap_->IterateOldPointers(&verify_store_buffer_visitor);
402
403 memset(from_->pointer(), 0xf3, from_->size());
404 #endif // defined(DEBUG) 457 #endif // defined(DEBUG)
458 from_->Delete();
459 from_ = NULL;
405 if (invoke_api_callbacks && (isolate->gc_epilogue_callback() != NULL)) { 460 if (invoke_api_callbacks && (isolate->gc_epilogue_callback() != NULL)) {
406 (isolate->gc_epilogue_callback())(); 461 (isolate->gc_epilogue_callback())();
407 } 462 }
408 } 463 }
409 464
410 465
411 void Scavenger::IterateStoreBuffers(Isolate* isolate, 466 void Scavenger::IterateStoreBuffers(Isolate* isolate,
412 ScavengerVisitor* visitor) { 467 ScavengerVisitor* visitor) {
413 StoreBuffer* buffer = isolate->store_buffer(); 468 StoreBuffer* buffer = isolate->store_buffer();
414 heap_->RecordData(kStoreBufferEntries, buffer->Count()); 469 heap_->RecordData(kStoreBufferEntries, buffer->Count());
(...skipping 317 matching lines...) Expand 10 before | Expand all | Expand 10 after
732 OS::PrintErr(" done.\n"); 787 OS::PrintErr(" done.\n");
733 } 788 }
734 789
735 // Done scavenging. Reset the marker. 790 // Done scavenging. Reset the marker.
736 ASSERT(scavenging_); 791 ASSERT(scavenging_);
737 scavenging_ = false; 792 scavenging_ = false;
738 } 793 }
739 794
740 795
741 void Scavenger::WriteProtect(bool read_only) { 796 void Scavenger::WriteProtect(bool read_only) {
742 if (space_ != NULL) { 797 ASSERT(!scavenging_);
743 space_->Protect( 798 ASSERT(from_ == NULL);
744 read_only ? VirtualMemory::kReadOnly : VirtualMemory::kReadWrite); 799 to_->WriteProtect(read_only);
745 }
746 } 800 }
747 801
748 802
749 void Scavenger::PrintToJSONObject(JSONObject* object) { 803 void Scavenger::PrintToJSONObject(JSONObject* object) {
750 JSONObject space(object, "new"); 804 JSONObject space(object, "new");
751 space.AddProperty("type", "@Scavenger"); 805 space.AddProperty("type", "@Scavenger");
752 space.AddProperty("id", "heaps/new"); 806 space.AddProperty("id", "heaps/new");
753 space.AddProperty("name", "Scavenger"); 807 space.AddProperty("name", "Scavenger");
754 space.AddProperty("user_name", "new"); 808 space.AddProperty("user_name", "new");
755 space.AddProperty("collections", collections()); 809 space.AddProperty("collections", collections());
(...skipping 10 matching lines...) Expand all
766 } 820 }
767 821
768 822
769 void Scavenger::FreeExternal(intptr_t size) { 823 void Scavenger::FreeExternal(intptr_t size) {
770 ASSERT(size >= 0); 824 ASSERT(size >= 0);
771 external_size_ -= size; 825 external_size_ -= size;
772 ASSERT(external_size_ >= 0); 826 ASSERT(external_size_ >= 0);
773 } 827 }
774 828
775 } // namespace dart 829 } // namespace dart
OLDNEW
« runtime/vm/scavenger.h ('K') | « runtime/vm/scavenger.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698