| OLD | NEW |
| 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/pages.h" | 5 #include "vm/pages.h" |
| 6 | 6 |
| 7 #include "platform/assert.h" | 7 #include "platform/assert.h" |
| 8 #include "vm/compiler_stats.h" | 8 #include "vm/compiler_stats.h" |
| 9 #include "vm/gc_marker.h" | 9 #include "vm/gc_marker.h" |
| 10 #include "vm/gc_sweeper.h" | 10 #include "vm/gc_sweeper.h" |
| 11 #include "vm/lockers.h" | 11 #include "vm/lockers.h" |
| 12 #include "vm/object.h" | 12 #include "vm/object.h" |
| 13 #include "vm/object_set.h" | 13 #include "vm/object_set.h" |
| 14 #include "vm/os_thread.h" | 14 #include "vm/os_thread.h" |
| 15 #include "vm/safepoint.h" | 15 #include "vm/safepoint.h" |
| 16 #include "vm/virtual_memory.h" | 16 #include "vm/virtual_memory.h" |
| 17 | 17 |
| 18 namespace dart { | 18 namespace dart { |
| 19 | 19 |
| 20 DEFINE_FLAG(int, heap_growth_rate, 0, | 20 DEFINE_FLAG(int, |
| 21 heap_growth_rate, |
| 22 0, |
| 21 "The max number of pages the heap can grow at a time"); | 23 "The max number of pages the heap can grow at a time"); |
| 22 DEFINE_FLAG(int, old_gen_growth_space_ratio, 20, | 24 DEFINE_FLAG(int, |
| 25 old_gen_growth_space_ratio, |
| 26 20, |
| 23 "The desired maximum percentage of free space after old gen GC"); | 27 "The desired maximum percentage of free space after old gen GC"); |
| 24 DEFINE_FLAG(int, old_gen_growth_time_ratio, 3, | 28 DEFINE_FLAG(int, |
| 29 old_gen_growth_time_ratio, |
| 30 3, |
| 25 "The desired maximum percentage of time spent in old gen GC"); | 31 "The desired maximum percentage of time spent in old gen GC"); |
| 26 DEFINE_FLAG(int, old_gen_growth_rate, 280, | 32 DEFINE_FLAG(int, |
| 33 old_gen_growth_rate, |
| 34 280, |
| 27 "The max number of pages the old generation can grow at a time"); | 35 "The max number of pages the old generation can grow at a time"); |
| 28 DEFINE_FLAG(bool, print_free_list_before_gc, false, | 36 DEFINE_FLAG(bool, |
| 37 print_free_list_before_gc, |
| 38 false, |
| 29 "Print free list statistics before a GC"); | 39 "Print free list statistics before a GC"); |
| 30 DEFINE_FLAG(bool, print_free_list_after_gc, false, | 40 DEFINE_FLAG(bool, |
| 41 print_free_list_after_gc, |
| 42 false, |
| 31 "Print free list statistics after a GC"); | 43 "Print free list statistics after a GC"); |
| 32 DEFINE_FLAG(int, code_collection_interval_in_us, 30000000, | 44 DEFINE_FLAG(int, |
| 45 code_collection_interval_in_us, |
| 46 30000000, |
| 33 "Time between attempts to collect unused code."); | 47 "Time between attempts to collect unused code."); |
| 34 DEFINE_FLAG(bool, log_code_drop, false, | 48 DEFINE_FLAG(bool, |
| 49 log_code_drop, |
| 50 false, |
| 35 "Emit a log message when pointers to unused code are dropped."); | 51 "Emit a log message when pointers to unused code are dropped."); |
| 36 DEFINE_FLAG(bool, always_drop_code, false, | 52 DEFINE_FLAG(bool, |
| 53 always_drop_code, |
| 54 false, |
| 37 "Always try to drop code if the function's usage counter is >= 0"); | 55 "Always try to drop code if the function's usage counter is >= 0"); |
| 38 DEFINE_FLAG(bool, log_growth, false, "Log PageSpace growth policy decisions."); | 56 DEFINE_FLAG(bool, log_growth, false, "Log PageSpace growth policy decisions."); |
| 39 | 57 |
| 40 HeapPage* HeapPage::Initialize(VirtualMemory* memory, PageType type) { | 58 HeapPage* HeapPage::Initialize(VirtualMemory* memory, PageType type) { |
| 41 ASSERT(memory != NULL); | 59 ASSERT(memory != NULL); |
| 42 ASSERT(memory->size() > VirtualMemory::PageSize()); | 60 ASSERT(memory->size() > VirtualMemory::PageSize()); |
| 43 bool is_executable = (type == kExecutable); | 61 bool is_executable = (type == kExecutable); |
| 44 // Create the new page executable (RWX) only if we're not in W^X mode | 62 // Create the new page executable (RWX) only if we're not in W^X mode |
| 45 bool create_executable = !FLAG_write_protect_code && is_executable; | 63 bool create_executable = !FLAG_write_protect_code && is_executable; |
| 46 if (!memory->Commit(create_executable)) { | 64 if (!memory->Commit(create_executable)) { |
| (...skipping 351 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 398 AtomicOperations::IncrementBy(&(usage_.used_in_words), | 416 AtomicOperations::IncrementBy(&(usage_.used_in_words), |
| 399 (size >> kWordSizeLog2)); | 417 (size >> kWordSizeLog2)); |
| 400 } | 418 } |
| 401 } | 419 } |
| 402 } | 420 } |
| 403 #ifdef DEBUG | 421 #ifdef DEBUG |
| 404 if (result != 0) { | 422 if (result != 0) { |
| 405 // A successful allocation should increase usage_. | 423 // A successful allocation should increase usage_. |
| 406 ASSERT(usage_before.used_in_words < usage_.used_in_words); | 424 ASSERT(usage_before.used_in_words < usage_.used_in_words); |
| 407 } | 425 } |
| 408 // Note we cannot assert that a failed allocation should not change | 426 // Note we cannot assert that a failed allocation should not change |
| 409 // used_in_words as another thread could have changed used_in_words. | 427 // used_in_words as another thread could have changed used_in_words. |
| 410 #endif | 428 #endif |
| 411 ASSERT((result & kObjectAlignmentMask) == kOldObjectAlignmentOffset); | 429 ASSERT((result & kObjectAlignmentMask) == kOldObjectAlignmentOffset); |
| 412 return result; | 430 return result; |
| 413 } | 431 } |
| 414 | 432 |
| 415 | 433 |
| 416 void PageSpace::AcquireDataLock() { | 434 void PageSpace::AcquireDataLock() { |
| 417 freelist_[HeapPage::kData].mutex()->Lock(); | 435 freelist_[HeapPage::kData].mutex()->Lock(); |
| 418 } | 436 } |
| 419 | 437 |
| 420 | 438 |
| 421 void PageSpace::ReleaseDataLock() { | 439 void PageSpace::ReleaseDataLock() { |
| 422 freelist_[HeapPage::kData].mutex()->Unlock(); | 440 freelist_[HeapPage::kData].mutex()->Unlock(); |
| 423 } | 441 } |
| 424 | 442 |
| 425 | 443 |
| 426 void PageSpace::AllocateExternal(intptr_t size) { | 444 void PageSpace::AllocateExternal(intptr_t size) { |
| 427 intptr_t size_in_words = size >> kWordSizeLog2; | 445 intptr_t size_in_words = size >> kWordSizeLog2; |
| 428 AtomicOperations::IncrementBy(&(usage_.external_in_words), size_in_words); | 446 AtomicOperations::IncrementBy(&(usage_.external_in_words), size_in_words); |
| 429 // TODO(koda): Control growth. | 447 // TODO(koda): Control growth. |
| 430 } | 448 } |
| 431 | 449 |
| 432 | 450 |
| 433 void PageSpace::FreeExternal(intptr_t size) { | 451 void PageSpace::FreeExternal(intptr_t size) { |
| (...skipping 15 matching lines...) Expand all Loading... |
| 449 page_ = space_->large_pages_; | 467 page_ = space_->large_pages_; |
| 450 } | 468 } |
| 451 } | 469 } |
| 452 } | 470 } |
| 453 HeapPage* page() const { return page_; } | 471 HeapPage* page() const { return page_; } |
| 454 bool Done() const { return page_ == NULL; } | 472 bool Done() const { return page_ == NULL; } |
| 455 void Advance() { | 473 void Advance() { |
| 456 ASSERT(!Done()); | 474 ASSERT(!Done()); |
| 457 page_ = space_->NextPageAnySize(page_); | 475 page_ = space_->NextPageAnySize(page_); |
| 458 } | 476 } |
| 477 |
| 459 private: | 478 private: |
| 460 const PageSpace* space_; | 479 const PageSpace* space_; |
| 461 MutexLocker ml_; | 480 MutexLocker ml_; |
| 462 NoSafepointScope no_safepoint; | 481 NoSafepointScope no_safepoint; |
| 463 HeapPage* page_; | 482 HeapPage* page_; |
| 464 }; | 483 }; |
| 465 | 484 |
| 466 | 485 |
| 467 // Provides exclusive access to code pages, and ensures they are walkable. | 486 // Provides exclusive access to code pages, and ensures they are walkable. |
| 468 // NOTE: This does not iterate over large pages which can contain code. | 487 // NOTE: This does not iterate over large pages which can contain code. |
| 469 class ExclusiveCodePageIterator : ValueObject { | 488 class ExclusiveCodePageIterator : ValueObject { |
| 470 public: | 489 public: |
| 471 explicit ExclusiveCodePageIterator(const PageSpace* space) | 490 explicit ExclusiveCodePageIterator(const PageSpace* space) |
| 472 : space_(space), ml_(space->pages_lock_) { | 491 : space_(space), ml_(space->pages_lock_) { |
| 473 space_->MakeIterable(); | 492 space_->MakeIterable(); |
| 474 page_ = space_->exec_pages_; | 493 page_ = space_->exec_pages_; |
| 475 } | 494 } |
| 476 HeapPage* page() const { return page_; } | 495 HeapPage* page() const { return page_; } |
| 477 bool Done() const { return page_ == NULL; } | 496 bool Done() const { return page_ == NULL; } |
| 478 void Advance() { | 497 void Advance() { |
| 479 ASSERT(!Done()); | 498 ASSERT(!Done()); |
| 480 page_ = page_->next(); | 499 page_ = page_->next(); |
| 481 } | 500 } |
| 501 |
| 482 private: | 502 private: |
| 483 const PageSpace* space_; | 503 const PageSpace* space_; |
| 484 MutexLocker ml_; | 504 MutexLocker ml_; |
| 485 NoSafepointScope no_safepoint; | 505 NoSafepointScope no_safepoint; |
| 486 HeapPage* page_; | 506 HeapPage* page_; |
| 487 }; | 507 }; |
| 488 | 508 |
| 489 | 509 |
| 490 // Provides exclusive access to large pages, and ensures they are walkable. | 510 // Provides exclusive access to large pages, and ensures they are walkable. |
| 491 class ExclusiveLargePageIterator : ValueObject { | 511 class ExclusiveLargePageIterator : ValueObject { |
| 492 public: | 512 public: |
| 493 explicit ExclusiveLargePageIterator(const PageSpace* space) | 513 explicit ExclusiveLargePageIterator(const PageSpace* space) |
| 494 : space_(space), ml_(space->pages_lock_) { | 514 : space_(space), ml_(space->pages_lock_) { |
| 495 space_->MakeIterable(); | 515 space_->MakeIterable(); |
| 496 page_ = space_->large_pages_; | 516 page_ = space_->large_pages_; |
| 497 } | 517 } |
| 498 HeapPage* page() const { return page_; } | 518 HeapPage* page() const { return page_; } |
| 499 bool Done() const { return page_ == NULL; } | 519 bool Done() const { return page_ == NULL; } |
| 500 void Advance() { | 520 void Advance() { |
| 501 ASSERT(!Done()); | 521 ASSERT(!Done()); |
| 502 page_ = page_->next(); | 522 page_ = page_->next(); |
| 503 } | 523 } |
| 524 |
| 504 private: | 525 private: |
| 505 const PageSpace* space_; | 526 const PageSpace* space_; |
| 506 MutexLocker ml_; | 527 MutexLocker ml_; |
| 507 NoSafepointScope no_safepoint; | 528 NoSafepointScope no_safepoint; |
| 508 HeapPage* page_; | 529 HeapPage* page_; |
| 509 }; | 530 }; |
| 510 | 531 |
| 511 | 532 |
| 512 void PageSpace::MakeIterable() const { | 533 void PageSpace::MakeIterable() const { |
| 513 // Assert not called from concurrent sweeper task. | 534 // Assert not called from concurrent sweeper task. |
| (...skipping 28 matching lines...) Expand all Loading... |
| 542 | 563 |
| 543 | 564 |
| 544 void PageSpace::UpdateMaxUsed() { | 565 void PageSpace::UpdateMaxUsed() { |
| 545 if (heap_ == NULL) { | 566 if (heap_ == NULL) { |
| 546 // Some unit tests. | 567 // Some unit tests. |
| 547 return; | 568 return; |
| 548 } | 569 } |
| 549 ASSERT(heap_ != NULL); | 570 ASSERT(heap_ != NULL); |
| 550 ASSERT(heap_->isolate() != NULL); | 571 ASSERT(heap_->isolate() != NULL); |
| 551 Isolate* isolate = heap_->isolate(); | 572 Isolate* isolate = heap_->isolate(); |
| 552 isolate->GetHeapOldUsedMaxMetric()->SetValue( | 573 isolate->GetHeapOldUsedMaxMetric()->SetValue(UsedInWords() * kWordSize); |
| 553 UsedInWords() * kWordSize); | |
| 554 } | 574 } |
| 555 | 575 |
| 556 | 576 |
| 557 bool PageSpace::Contains(uword addr) const { | 577 bool PageSpace::Contains(uword addr) const { |
| 558 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) { | 578 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) { |
| 559 if (it.page()->Contains(addr)) { | 579 if (it.page()->Contains(addr)) { |
| 560 return true; | 580 return true; |
| 561 } | 581 } |
| 562 } | 582 } |
| 563 return false; | 583 return false; |
| (...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 692 space.AddProperty("avgCollectionPeriodMillis", | 712 space.AddProperty("avgCollectionPeriodMillis", |
| 693 avg_time_between_collections); | 713 avg_time_between_collections); |
| 694 } else { | 714 } else { |
| 695 space.AddProperty("avgCollectionPeriodMillis", 0.0); | 715 space.AddProperty("avgCollectionPeriodMillis", 0.0); |
| 696 } | 716 } |
| 697 } | 717 } |
| 698 | 718 |
| 699 | 719 |
| 700 class HeapMapAsJSONVisitor : public ObjectVisitor { | 720 class HeapMapAsJSONVisitor : public ObjectVisitor { |
| 701 public: | 721 public: |
| 702 explicit HeapMapAsJSONVisitor(JSONArray* array) : array_(array) { } | 722 explicit HeapMapAsJSONVisitor(JSONArray* array) : array_(array) {} |
| 703 virtual void VisitObject(RawObject* obj) { | 723 virtual void VisitObject(RawObject* obj) { |
| 704 array_->AddValue(obj->Size() / kObjectAlignment); | 724 array_->AddValue(obj->Size() / kObjectAlignment); |
| 705 array_->AddValue(obj->GetClassId()); | 725 array_->AddValue(obj->GetClassId()); |
| 706 } | 726 } |
| 727 |
| 707 private: | 728 private: |
| 708 JSONArray* array_; | 729 JSONArray* array_; |
| 709 }; | 730 }; |
| 710 | 731 |
| 711 | 732 |
| 712 void PageSpace::PrintHeapMapToJSONStream( | 733 void PageSpace::PrintHeapMapToJSONStream(Isolate* isolate, |
| 713 Isolate* isolate, JSONStream* stream) const { | 734 JSONStream* stream) const { |
| 714 if (!FLAG_support_service) { | 735 if (!FLAG_support_service) { |
| 715 return; | 736 return; |
| 716 } | 737 } |
| 717 JSONObject heap_map(stream); | 738 JSONObject heap_map(stream); |
| 718 heap_map.AddProperty("type", "HeapMap"); | 739 heap_map.AddProperty("type", "HeapMap"); |
| 719 heap_map.AddProperty("freeClassId", | 740 heap_map.AddProperty("freeClassId", static_cast<intptr_t>(kFreeListElement)); |
| 720 static_cast<intptr_t>(kFreeListElement)); | |
| 721 heap_map.AddProperty("unitSizeBytes", | 741 heap_map.AddProperty("unitSizeBytes", |
| 722 static_cast<intptr_t>(kObjectAlignment)); | 742 static_cast<intptr_t>(kObjectAlignment)); |
| 723 heap_map.AddProperty("pageSizeBytes", kPageSizeInWords * kWordSize); | 743 heap_map.AddProperty("pageSizeBytes", kPageSizeInWords * kWordSize); |
| 724 { | 744 { |
| 725 JSONObject class_list(&heap_map, "classList"); | 745 JSONObject class_list(&heap_map, "classList"); |
| 726 isolate->class_table()->PrintToJSONObject(&class_list); | 746 isolate->class_table()->PrintToJSONObject(&class_list); |
| 727 } | 747 } |
| 728 { | 748 { |
| 729 // "pages" is an array [page0, page1, ..., pageN], each page of the form | 749 // "pages" is an array [page0, page1, ..., pageN], each page of the form |
| 730 // {"object_start": "0x...", "objects": [size, class id, size, ...]} | 750 // {"object_start": "0x...", "objects": [size, class id, size, ...]} |
| 731 // TODO(19445): Use ExclusivePageIterator once HeapMap supports large pages. | 751 // TODO(19445): Use ExclusivePageIterator once HeapMap supports large pages. |
| 732 MutexLocker ml(pages_lock_); | 752 MutexLocker ml(pages_lock_); |
| 733 MakeIterable(); | 753 MakeIterable(); |
| 734 NoSafepointScope no_safepoint; | 754 NoSafepointScope no_safepoint; |
| 735 JSONArray all_pages(&heap_map, "pages"); | 755 JSONArray all_pages(&heap_map, "pages"); |
| 736 for (HeapPage* page = pages_; page != NULL; page = page->next()) { | 756 for (HeapPage* page = pages_; page != NULL; page = page->next()) { |
| 737 JSONObject page_container(&all_pages); | 757 JSONObject page_container(&all_pages); |
| 738 page_container.AddPropertyF("objectStart", | 758 page_container.AddPropertyF("objectStart", "0x%" Px "", |
| 739 "0x%" Px "", page->object_start()); | 759 page->object_start()); |
| 740 JSONArray page_map(&page_container, "objects"); | 760 JSONArray page_map(&page_container, "objects"); |
| 741 HeapMapAsJSONVisitor printer(&page_map); | 761 HeapMapAsJSONVisitor printer(&page_map); |
| 742 page->VisitObjects(&printer); | 762 page->VisitObjects(&printer); |
| 743 } | 763 } |
| 744 for (HeapPage* page = exec_pages_; page != NULL; page = page->next()) { | 764 for (HeapPage* page = exec_pages_; page != NULL; page = page->next()) { |
| 745 JSONObject page_container(&all_pages); | 765 JSONObject page_container(&all_pages); |
| 746 page_container.AddPropertyF("objectStart", | 766 page_container.AddPropertyF("objectStart", "0x%" Px "", |
| 747 "0x%" Px "", page->object_start()); | 767 page->object_start()); |
| 748 JSONArray page_map(&page_container, "objects"); | 768 JSONArray page_map(&page_container, "objects"); |
| 749 HeapMapAsJSONVisitor printer(&page_map); | 769 HeapMapAsJSONVisitor printer(&page_map); |
| 750 page->VisitObjects(&printer); | 770 page->VisitObjects(&printer); |
| 751 } | 771 } |
| 752 } | 772 } |
| 753 } | 773 } |
| 754 #endif // PRODUCT | 774 #endif // PRODUCT |
| 755 | 775 |
| 756 | 776 |
| 757 bool PageSpace::ShouldCollectCode() { | 777 bool PageSpace::ShouldCollectCode() { |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 834 | 854 |
| 835 const int64_t start = OS::GetCurrentTimeMicros(); | 855 const int64_t start = OS::GetCurrentTimeMicros(); |
| 836 | 856 |
| 837 // Make code pages writable. | 857 // Make code pages writable. |
| 838 WriteProtectCode(false); | 858 WriteProtectCode(false); |
| 839 | 859 |
| 840 // Save old value before GCMarker visits the weak persistent handles. | 860 // Save old value before GCMarker visits the weak persistent handles. |
| 841 SpaceUsage usage_before = GetCurrentUsage(); | 861 SpaceUsage usage_before = GetCurrentUsage(); |
| 842 | 862 |
| 843 // Mark all reachable old-gen objects. | 863 // Mark all reachable old-gen objects. |
| 844 bool collect_code = FLAG_collect_code && | 864 bool collect_code = FLAG_collect_code && ShouldCollectCode() && |
| 845 ShouldCollectCode() && | |
| 846 !isolate->HasAttemptedReload(); | 865 !isolate->HasAttemptedReload(); |
| 847 GCMarker marker(heap_); | 866 GCMarker marker(heap_); |
| 848 marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); | 867 marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); |
| 849 usage_.used_in_words = marker.marked_words(); | 868 usage_.used_in_words = marker.marked_words(); |
| 850 | 869 |
| 851 int64_t mid1 = OS::GetCurrentTimeMicros(); | 870 int64_t mid1 = OS::GetCurrentTimeMicros(); |
| 852 | 871 |
| 853 // Abandon the remainder of the bump allocation block. | 872 // Abandon the remainder of the bump allocation block. |
| 854 AbandonBumpAllocation(); | 873 AbandonBumpAllocation(); |
| 855 // Reset the freelists and setup sweeping. | 874 // Reset the freelists and setup sweeping. |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 904 } | 923 } |
| 905 | 924 |
| 906 mid3 = OS::GetCurrentTimeMicros(); | 925 mid3 = OS::GetCurrentTimeMicros(); |
| 907 | 926 |
| 908 if (!FLAG_concurrent_sweep) { | 927 if (!FLAG_concurrent_sweep) { |
| 909 // Sweep all regular sized pages now. | 928 // Sweep all regular sized pages now. |
| 910 prev_page = NULL; | 929 prev_page = NULL; |
| 911 page = pages_; | 930 page = pages_; |
| 912 while (page != NULL) { | 931 while (page != NULL) { |
| 913 HeapPage* next_page = page->next(); | 932 HeapPage* next_page = page->next(); |
| 914 bool page_in_use = sweeper.SweepPage( | 933 bool page_in_use = |
| 915 page, &freelist_[page->type()], true); | 934 sweeper.SweepPage(page, &freelist_[page->type()], true); |
| 916 if (page_in_use) { | 935 if (page_in_use) { |
| 917 prev_page = page; | 936 prev_page = page; |
| 918 } else { | 937 } else { |
| 919 FreePage(page, prev_page); | 938 FreePage(page, prev_page); |
| 920 } | 939 } |
| 921 // Advance to the next page. | 940 // Advance to the next page. |
| 922 page = next_page; | 941 page = next_page; |
| 923 } | 942 } |
| 924 if (FLAG_verify_after_gc) { | 943 if (FLAG_verify_after_gc) { |
| 925 OS::PrintErr("Verifying after sweeping..."); | 944 OS::PrintErr("Verifying after sweeping..."); |
| 926 heap_->VerifyGC(kForbidMarked); | 945 heap_->VerifyGC(kForbidMarked); |
| 927 OS::PrintErr(" done.\n"); | 946 OS::PrintErr(" done.\n"); |
| 928 } | 947 } |
| 929 } else { | 948 } else { |
| 930 // Start the concurrent sweeper task now. | 949 // Start the concurrent sweeper task now. |
| 931 GCSweeper::SweepConcurrent( | 950 GCSweeper::SweepConcurrent(isolate, pages_, pages_tail_, |
| 932 isolate, pages_, pages_tail_, &freelist_[HeapPage::kData]); | 951 &freelist_[HeapPage::kData]); |
| 933 } | 952 } |
| 934 } | 953 } |
| 935 | 954 |
| 936 // Make code pages read-only. | 955 // Make code pages read-only. |
| 937 WriteProtectCode(true); | 956 WriteProtectCode(true); |
| 938 | 957 |
| 939 int64_t end = OS::GetCurrentTimeMicros(); | 958 int64_t end = OS::GetCurrentTimeMicros(); |
| 940 | 959 |
| 941 // Record signals for growth control. Include size of external allocations. | 960 // Record signals for growth control. Include size of external allocations. |
| 942 page_space_controller_.EvaluateGarbageCollection(usage_before, | 961 page_space_controller_.EvaluateGarbageCollection( |
| 943 GetCurrentUsage(), | 962 usage_before, GetCurrentUsage(), start, end); |
| 944 start, end); | |
| 945 | 963 |
| 946 heap_->RecordTime(kMarkObjects, mid1 - start); | 964 heap_->RecordTime(kMarkObjects, mid1 - start); |
| 947 heap_->RecordTime(kResetFreeLists, mid2 - mid1); | 965 heap_->RecordTime(kResetFreeLists, mid2 - mid1); |
| 948 heap_->RecordTime(kSweepPages, mid3 - mid2); | 966 heap_->RecordTime(kSweepPages, mid3 - mid2); |
| 949 heap_->RecordTime(kSweepLargePages, end - mid3); | 967 heap_->RecordTime(kSweepLargePages, end - mid3); |
| 950 | 968 |
| 951 if (FLAG_print_free_list_after_gc) { | 969 if (FLAG_print_free_list_after_gc) { |
| 952 OS::Print("Data Freelist (after GC):\n"); | 970 OS::Print("Data Freelist (after GC):\n"); |
| 953 freelist_[HeapPage::kData].Print(); | 971 freelist_[HeapPage::kData].Print(); |
| 954 OS::Print("Executable Freelist (after GC):\n"); | 972 OS::Print("Executable Freelist (after GC):\n"); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 972 | 990 |
| 973 uword PageSpace::TryAllocateDataBumpInternal(intptr_t size, | 991 uword PageSpace::TryAllocateDataBumpInternal(intptr_t size, |
| 974 GrowthPolicy growth_policy, | 992 GrowthPolicy growth_policy, |
| 975 bool is_locked) { | 993 bool is_locked) { |
| 976 ASSERT(size >= kObjectAlignment); | 994 ASSERT(size >= kObjectAlignment); |
| 977 ASSERT(Utils::IsAligned(size, kObjectAlignment)); | 995 ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
| 978 intptr_t remaining = bump_end_ - bump_top_; | 996 intptr_t remaining = bump_end_ - bump_top_; |
| 979 if (remaining < size) { | 997 if (remaining < size) { |
| 980 // Checking this first would be logical, but needlessly slow. | 998 // Checking this first would be logical, but needlessly slow. |
| 981 if (size >= kAllocatablePageSize) { | 999 if (size >= kAllocatablePageSize) { |
| 982 return is_locked ? | 1000 return is_locked ? TryAllocateDataLocked(size, growth_policy) |
| 983 TryAllocateDataLocked(size, growth_policy) : | 1001 : TryAllocate(size, HeapPage::kData, growth_policy); |
| 984 TryAllocate(size, HeapPage::kData, growth_policy); | |
| 985 } | 1002 } |
| 986 FreeListElement* block = is_locked ? | 1003 FreeListElement* block = |
| 987 freelist_[HeapPage::kData].TryAllocateLargeLocked(size) : | 1004 is_locked ? freelist_[HeapPage::kData].TryAllocateLargeLocked(size) |
| 988 freelist_[HeapPage::kData].TryAllocateLarge(size); | 1005 : freelist_[HeapPage::kData].TryAllocateLarge(size); |
| 989 if (block == NULL) { | 1006 if (block == NULL) { |
| 990 // Allocating from a new page (if growth policy allows) will have the | 1007 // Allocating from a new page (if growth policy allows) will have the |
| 991 // side-effect of populating the freelist with a large block. The next | 1008 // side-effect of populating the freelist with a large block. The next |
| 992 // bump allocation request will have a chance to consume that block. | 1009 // bump allocation request will have a chance to consume that block. |
| 993 // TODO(koda): Could take freelist lock just once instead of twice. | 1010 // TODO(koda): Could take freelist lock just once instead of twice. |
| 994 return TryAllocateInFreshPage(size, | 1011 return TryAllocateInFreshPage(size, HeapPage::kData, growth_policy, |
| 995 HeapPage::kData, | |
| 996 growth_policy, | |
| 997 is_locked); | 1012 is_locked); |
| 998 } | 1013 } |
| 999 intptr_t block_size = block->Size(); | 1014 intptr_t block_size = block->Size(); |
| 1000 if (remaining > 0) { | 1015 if (remaining > 0) { |
| 1001 if (is_locked) { | 1016 if (is_locked) { |
| 1002 freelist_[HeapPage::kData].FreeLocked(bump_top_, remaining); | 1017 freelist_[HeapPage::kData].FreeLocked(bump_top_, remaining); |
| 1003 } else { | 1018 } else { |
| 1004 freelist_[HeapPage::kData].Free(bump_top_, remaining); | 1019 freelist_[HeapPage::kData].Free(bump_top_, remaining); |
| 1005 } | 1020 } |
| 1006 } | 1021 } |
| 1007 bump_top_ = reinterpret_cast<uword>(block); | 1022 bump_top_ = reinterpret_cast<uword>(block); |
| 1008 bump_end_ = bump_top_ + block_size; | 1023 bump_end_ = bump_top_ + block_size; |
| 1009 remaining = block_size; | 1024 remaining = block_size; |
| 1010 } | 1025 } |
| 1011 ASSERT(remaining >= size); | 1026 ASSERT(remaining >= size); |
| 1012 uword result = bump_top_; | 1027 uword result = bump_top_; |
| 1013 bump_top_ += size; | 1028 bump_top_ += size; |
| 1014 AtomicOperations::IncrementBy(&(usage_.used_in_words), | 1029 AtomicOperations::IncrementBy(&(usage_.used_in_words), |
| 1015 (size >> kWordSizeLog2)); | 1030 (size >> kWordSizeLog2)); |
| 1016 // Note: Remaining block is unwalkable until MakeIterable is called. | 1031 // Note: Remaining block is unwalkable until MakeIterable is called. |
| 1017 #ifdef DEBUG | 1032 #ifdef DEBUG |
| 1018 if (bump_top_ < bump_end_) { | 1033 if (bump_top_ < bump_end_) { |
| 1019 // Fail fast if we try to walk the remaining block. | 1034 // Fail fast if we try to walk the remaining block. |
| 1020 COMPILE_ASSERT(kIllegalCid == 0); | 1035 COMPILE_ASSERT(kIllegalCid == 0); |
| 1021 *reinterpret_cast<uword*>(bump_top_) = 0; | 1036 *reinterpret_cast<uword*>(bump_top_) = 0; |
| 1022 } | 1037 } |
| 1023 #endif // DEBUG | 1038 #endif // DEBUG |
| 1024 return result; | 1039 return result; |
| 1025 } | 1040 } |
| 1026 | 1041 |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1064 size += offset; | 1079 size += offset; |
| 1065 | 1080 |
| 1066 VirtualMemory* memory = VirtualMemory::ForExternalPage(pointer, size); | 1081 VirtualMemory* memory = VirtualMemory::ForExternalPage(pointer, size); |
| 1067 ASSERT(memory != NULL); | 1082 ASSERT(memory != NULL); |
| 1068 HeapPage* page = reinterpret_cast<HeapPage*>(malloc(sizeof(HeapPage))); | 1083 HeapPage* page = reinterpret_cast<HeapPage*>(malloc(sizeof(HeapPage))); |
| 1069 page->memory_ = memory; | 1084 page->memory_ = memory; |
| 1070 page->next_ = NULL; | 1085 page->next_ = NULL; |
| 1071 page->object_end_ = memory->end(); | 1086 page->object_end_ = memory->end(); |
| 1072 | 1087 |
| 1073 MutexLocker ml(pages_lock_); | 1088 MutexLocker ml(pages_lock_); |
| 1074 HeapPage** first, **tail; | 1089 HeapPage **first, **tail; |
| 1075 if (is_executable) { | 1090 if (is_executable) { |
| 1076 ASSERT(Utils::IsAligned(pointer, OS::PreferredCodeAlignment())); | 1091 ASSERT(Utils::IsAligned(pointer, OS::PreferredCodeAlignment())); |
| 1077 page->type_ = HeapPage::kExecutable; | 1092 page->type_ = HeapPage::kExecutable; |
| 1078 first = &exec_pages_; | 1093 first = &exec_pages_; |
| 1079 tail = &exec_pages_tail_; | 1094 tail = &exec_pages_tail_; |
| 1080 } else { | 1095 } else { |
| 1081 page->type_ = HeapPage::kReadOnlyData; | 1096 page->type_ = HeapPage::kReadOnlyData; |
| 1082 first = &pages_; | 1097 first = &pages_; |
| 1083 tail = &pages_tail_; | 1098 tail = &pages_tail_; |
| 1084 } | 1099 } |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1095 int heap_growth_ratio, | 1110 int heap_growth_ratio, |
| 1096 int heap_growth_max, | 1111 int heap_growth_max, |
| 1097 int garbage_collection_time_ratio) | 1112 int garbage_collection_time_ratio) |
| 1098 : heap_(heap), | 1113 : heap_(heap), |
| 1099 is_enabled_(false), | 1114 is_enabled_(false), |
| 1100 grow_heap_(heap_growth_max / 2), | 1115 grow_heap_(heap_growth_max / 2), |
| 1101 heap_growth_ratio_(heap_growth_ratio), | 1116 heap_growth_ratio_(heap_growth_ratio), |
| 1102 desired_utilization_((100.0 - heap_growth_ratio) / 100.0), | 1117 desired_utilization_((100.0 - heap_growth_ratio) / 100.0), |
| 1103 heap_growth_max_(heap_growth_max), | 1118 heap_growth_max_(heap_growth_max), |
| 1104 garbage_collection_time_ratio_(garbage_collection_time_ratio), | 1119 garbage_collection_time_ratio_(garbage_collection_time_ratio), |
| 1105 last_code_collection_in_us_(OS::GetCurrentTimeMicros()) { | 1120 last_code_collection_in_us_(OS::GetCurrentTimeMicros()) {} |
| 1106 } | |
| 1107 | 1121 |
| 1108 | 1122 |
| 1109 PageSpaceController::~PageSpaceController() {} | 1123 PageSpaceController::~PageSpaceController() {} |
| 1110 | 1124 |
| 1111 | 1125 |
| 1112 bool PageSpaceController::NeedsGarbageCollection(SpaceUsage after) const { | 1126 bool PageSpaceController::NeedsGarbageCollection(SpaceUsage after) const { |
| 1113 if (!is_enabled_) { | 1127 if (!is_enabled_) { |
| 1114 return false; | 1128 return false; |
| 1115 } | 1129 } |
| 1116 if (heap_growth_ratio_ == 100) { | 1130 if (heap_growth_ratio_ == 100) { |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1132 if (history_.IsEmpty()) { | 1146 if (history_.IsEmpty()) { |
| 1133 double seconds_since_init = MicrosecondsToSeconds( | 1147 double seconds_since_init = MicrosecondsToSeconds( |
| 1134 OS::GetCurrentTimeMicros() - heap_->isolate()->start_time()); | 1148 OS::GetCurrentTimeMicros() - heap_->isolate()->start_time()); |
| 1135 if (seconds_since_init > kInitialTimeoutSeconds) { | 1149 if (seconds_since_init > kInitialTimeoutSeconds) { |
| 1136 multiplier *= seconds_since_init / kInitialTimeoutSeconds; | 1150 multiplier *= seconds_since_init / kInitialTimeoutSeconds; |
| 1137 } | 1151 } |
| 1138 } | 1152 } |
| 1139 bool needs_gc = capacity_increase_in_pages * multiplier > grow_heap_; | 1153 bool needs_gc = capacity_increase_in_pages * multiplier > grow_heap_; |
| 1140 if (FLAG_log_growth) { | 1154 if (FLAG_log_growth) { |
| 1141 OS::PrintErr("%s: %" Pd " * %f %s %" Pd "\n", | 1155 OS::PrintErr("%s: %" Pd " * %f %s %" Pd "\n", |
| 1142 needs_gc ? "NEEDS GC" : "grow", | 1156 needs_gc ? "NEEDS GC" : "grow", capacity_increase_in_pages, |
| 1143 capacity_increase_in_pages, | 1157 multiplier, needs_gc ? ">" : "<=", grow_heap_); |
| 1144 multiplier, | |
| 1145 needs_gc ? ">" : "<=", | |
| 1146 grow_heap_); | |
| 1147 } | 1158 } |
| 1148 return needs_gc; | 1159 return needs_gc; |
| 1149 } | 1160 } |
| 1150 | 1161 |
| 1151 | 1162 |
| 1152 void PageSpaceController::EvaluateGarbageCollection( | 1163 void PageSpaceController::EvaluateGarbageCollection(SpaceUsage before, |
| 1153 SpaceUsage before, SpaceUsage after, int64_t start, int64_t end) { | 1164 SpaceUsage after, |
| 1165 int64_t start, |
| 1166 int64_t end) { |
| 1154 ASSERT(end >= start); | 1167 ASSERT(end >= start); |
| 1155 history_.AddGarbageCollectionTime(start, end); | 1168 history_.AddGarbageCollectionTime(start, end); |
| 1156 const int gc_time_fraction = history_.GarbageCollectionTimeFraction(); | 1169 const int gc_time_fraction = history_.GarbageCollectionTimeFraction(); |
| 1157 heap_->RecordData(PageSpace::kGCTimeFraction, gc_time_fraction); | 1170 heap_->RecordData(PageSpace::kGCTimeFraction, gc_time_fraction); |
| 1158 | 1171 |
| 1159 // Assume garbage increases linearly with allocation: | 1172 // Assume garbage increases linearly with allocation: |
| 1160 // G = kA, and estimate k from the previous cycle. | 1173 // G = kA, and estimate k from the previous cycle. |
| 1161 const intptr_t allocated_since_previous_gc = | 1174 const intptr_t allocated_since_previous_gc = |
| 1162 before.used_in_words - last_usage_.used_in_words; | 1175 before.used_in_words - last_usage_.used_in_words; |
| 1163 if (allocated_since_previous_gc > 0) { | 1176 if (allocated_since_previous_gc > 0) { |
| 1164 const intptr_t garbage = before.used_in_words - after.used_in_words; | 1177 const intptr_t garbage = before.used_in_words - after.used_in_words; |
| 1165 ASSERT(garbage >= 0); | 1178 ASSERT(garbage >= 0); |
| 1166 const double k = garbage / static_cast<double>(allocated_since_previous_gc); | 1179 const double k = garbage / static_cast<double>(allocated_since_previous_gc); |
| 1167 const int garbage_ratio = static_cast<int>(k * 100); | 1180 const int garbage_ratio = static_cast<int>(k * 100); |
| 1168 heap_->RecordData(PageSpace::kGarbageRatio, garbage_ratio); | 1181 heap_->RecordData(PageSpace::kGarbageRatio, garbage_ratio); |
| 1169 | 1182 |
| 1170 // Define GC to be 'worthwhile' iff at least fraction t of heap is garbage. | 1183 // Define GC to be 'worthwhile' iff at least fraction t of heap is garbage. |
| 1171 double t = 1.0 - desired_utilization_; | 1184 double t = 1.0 - desired_utilization_; |
| 1172 // If we spend too much time in GC, strive for even more free space. | 1185 // If we spend too much time in GC, strive for even more free space. |
| 1173 if (gc_time_fraction > garbage_collection_time_ratio_) { | 1186 if (gc_time_fraction > garbage_collection_time_ratio_) { |
| 1174 t += (gc_time_fraction - garbage_collection_time_ratio_) / 100.0; | 1187 t += (gc_time_fraction - garbage_collection_time_ratio_) / 100.0; |
| 1175 } | 1188 } |
| 1176 | 1189 |
| 1177 const intptr_t grow_ratio = ( | 1190 const intptr_t grow_ratio = |
| 1178 static_cast<intptr_t>(after.capacity_in_words / desired_utilization_) - | 1191 (static_cast<intptr_t>(after.capacity_in_words / desired_utilization_) - |
| 1179 after.capacity_in_words) / PageSpace::kPageSizeInWords; | 1192 after.capacity_in_words) / |
| 1193 PageSpace::kPageSizeInWords; |
| 1180 if (garbage_ratio == 0) { | 1194 if (garbage_ratio == 0) { |
| 1181 // No garbage in the previous cycle so it would be hard to compute a | 1195 // No garbage in the previous cycle so it would be hard to compute a |
| 1182 // grow_heap_ size based on estimated garbage so we use growth ratio | 1196 // grow_heap_ size based on estimated garbage so we use growth ratio |
| 1183 // heuristics instead. | 1197 // heuristics instead. |
| 1184 grow_heap_ = Utils::Maximum(static_cast<intptr_t>(heap_growth_max_), | 1198 grow_heap_ = |
| 1185 grow_ratio); | 1199 Utils::Maximum(static_cast<intptr_t>(heap_growth_max_), grow_ratio); |
| 1186 } else { | 1200 } else { |
| 1187 // Find minimum 'grow_heap_' such that after increasing capacity by | 1201 // Find minimum 'grow_heap_' such that after increasing capacity by |
| 1188 // 'grow_heap_' pages and filling them, we expect a GC to be worthwhile. | 1202 // 'grow_heap_' pages and filling them, we expect a GC to be worthwhile. |
| 1189 intptr_t max = heap_growth_max_; | 1203 intptr_t max = heap_growth_max_; |
| 1190 intptr_t min = 0; | 1204 intptr_t min = 0; |
| 1191 intptr_t local_grow_heap = 0; | 1205 intptr_t local_grow_heap = 0; |
| 1192 while (min < max) { | 1206 while (min < max) { |
| 1193 local_grow_heap = (max + min) / 2; | 1207 local_grow_heap = (max + min) / 2; |
| 1194 const intptr_t limit = after.capacity_in_words + | 1208 const intptr_t limit = after.capacity_in_words + |
| 1195 (grow_heap_ * PageSpace::kPageSizeInWords); | 1209 (grow_heap_ * PageSpace::kPageSizeInWords); |
| 1196 const intptr_t allocated_before_next_gc = limit - after.used_in_words; | 1210 const intptr_t allocated_before_next_gc = limit - after.used_in_words; |
| 1197 const double estimated_garbage = k * allocated_before_next_gc; | 1211 const double estimated_garbage = k * allocated_before_next_gc; |
| 1198 if (t <= estimated_garbage / limit) { | 1212 if (t <= estimated_garbage / limit) { |
| 1199 max = local_grow_heap - 1; | 1213 max = local_grow_heap - 1; |
| 1200 } else { | 1214 } else { |
| 1201 min = local_grow_heap + 1; | 1215 min = local_grow_heap + 1; |
| 1202 } | 1216 } |
| 1203 } | 1217 } |
| 1204 grow_heap_ = local_grow_heap; | 1218 grow_heap_ = local_grow_heap; |
| 1205 ASSERT(grow_heap_ >= 0); | 1219 ASSERT(grow_heap_ >= 0); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 1218 // Limit shrinkage: allow growth by at least half the pages freed by GC. | 1232 // Limit shrinkage: allow growth by at least half the pages freed by GC. |
| 1219 const intptr_t freed_pages = | 1233 const intptr_t freed_pages = |
| 1220 (before.capacity_in_words - after.capacity_in_words) / | 1234 (before.capacity_in_words - after.capacity_in_words) / |
| 1221 PageSpace::kPageSizeInWords; | 1235 PageSpace::kPageSizeInWords; |
| 1222 grow_heap_ = Utils::Maximum(grow_heap_, freed_pages / 2); | 1236 grow_heap_ = Utils::Maximum(grow_heap_, freed_pages / 2); |
| 1223 heap_->RecordData(PageSpace::kAllowedGrowth, grow_heap_); | 1237 heap_->RecordData(PageSpace::kAllowedGrowth, grow_heap_); |
| 1224 last_usage_ = after; | 1238 last_usage_ = after; |
| 1225 } | 1239 } |
| 1226 | 1240 |
| 1227 | 1241 |
| 1228 void PageSpaceGarbageCollectionHistory:: | 1242 void PageSpaceGarbageCollectionHistory::AddGarbageCollectionTime(int64_t start, |
| 1229 AddGarbageCollectionTime(int64_t start, int64_t end) { | 1243 int64_t end) { |
| 1230 Entry entry; | 1244 Entry entry; |
| 1231 entry.start = start; | 1245 entry.start = start; |
| 1232 entry.end = end; | 1246 entry.end = end; |
| 1233 history_.Add(entry); | 1247 history_.Add(entry); |
| 1234 } | 1248 } |
| 1235 | 1249 |
| 1236 | 1250 |
| 1237 int PageSpaceGarbageCollectionHistory::GarbageCollectionTimeFraction() { | 1251 int PageSpaceGarbageCollectionHistory::GarbageCollectionTimeFraction() { |
| 1238 int64_t gc_time = 0; | 1252 int64_t gc_time = 0; |
| 1239 int64_t total_time = 0; | 1253 int64_t total_time = 0; |
| 1240 for (int i = 0; i < history_.Size() - 1; i++) { | 1254 for (int i = 0; i < history_.Size() - 1; i++) { |
| 1241 Entry current = history_.Get(i); | 1255 Entry current = history_.Get(i); |
| 1242 Entry previous = history_.Get(i + 1); | 1256 Entry previous = history_.Get(i + 1); |
| 1243 gc_time += current.end - current.start; | 1257 gc_time += current.end - current.start; |
| 1244 total_time += current.end - previous.end; | 1258 total_time += current.end - previous.end; |
| 1245 } | 1259 } |
| 1246 if (total_time == 0) { | 1260 if (total_time == 0) { |
| 1247 return 0; | 1261 return 0; |
| 1248 } else { | 1262 } else { |
| 1249 ASSERT(total_time >= gc_time); | 1263 ASSERT(total_time >= gc_time); |
| 1250 int result = static_cast<int>((static_cast<double>(gc_time) / | 1264 int result = static_cast<int>( |
| 1251 static_cast<double>(total_time)) * 100); | 1265 (static_cast<double>(gc_time) / static_cast<double>(total_time)) * 100); |
| 1252 return result; | 1266 return result; |
| 1253 } | 1267 } |
| 1254 } | 1268 } |
| 1255 | 1269 |
| 1256 } // namespace dart | 1270 } // namespace dart |
| OLD | NEW |