OLD | NEW |
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/pages.h" | 5 #include "vm/pages.h" |
6 | 6 |
7 #include "platform/assert.h" | 7 #include "platform/assert.h" |
8 #include "vm/compiler_stats.h" | 8 #include "vm/compiler_stats.h" |
9 #include "vm/gc_marker.h" | 9 #include "vm/gc_marker.h" |
10 #include "vm/gc_sweeper.h" | 10 #include "vm/gc_sweeper.h" |
(...skipping 385 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
396 // TODO(koda): Control growth. | 396 // TODO(koda): Control growth. |
397 } | 397 } |
398 | 398 |
399 | 399 |
400 void PageSpace::FreeExternal(intptr_t size) { | 400 void PageSpace::FreeExternal(intptr_t size) { |
401 intptr_t size_in_words = size >> kWordSizeLog2; | 401 intptr_t size_in_words = size >> kWordSizeLog2; |
402 usage_.external_in_words -= size_in_words; | 402 usage_.external_in_words -= size_in_words; |
403 } | 403 } |
404 | 404 |
405 | 405 |
| 406 // Provides exclusive access to the pages, and ensures they are walkable. |
| 407 class ExclusivePageIterator : ValueObject { |
| 408 public: |
| 409 explicit ExclusivePageIterator(const PageSpace* space) |
| 410 : space_(space), ml_(space->pages_lock_) { |
| 411 space_->MakeIterable(); |
| 412 page_ = space_->pages_; |
| 413 if (page_ == NULL) { |
| 414 page_ = space_->exec_pages_; |
| 415 if (page_ == NULL) { |
| 416 page_ = space_->large_pages_; |
| 417 } |
| 418 } |
| 419 } |
| 420 HeapPage* page() const { return page_; } |
| 421 bool done() const { return page_ == NULL; } |
| 422 void Advance() { |
| 423 ASSERT(!done()); |
| 424 page_ = space_->NextPageAnySize(page_); |
| 425 } |
| 426 private: |
| 427 const PageSpace* space_; |
| 428 MutexLocker ml_; |
| 429 NoGCScope no_gc; |
| 430 HeapPage* page_; |
| 431 }; |
| 432 |
| 433 |
| 434 void PageSpace::MakeIterable() const { |
| 435 // TODO(koda): Assert not called from concurrent sweeper task. |
| 436 if (bump_top_ < bump_end_) { |
| 437 FreeListElement::AsElement(bump_top_, bump_end_ - bump_top_); |
| 438 } |
| 439 } |
| 440 |
| 441 |
406 bool PageSpace::Contains(uword addr) const { | 442 bool PageSpace::Contains(uword addr) const { |
407 MutexLocker ml(pages_lock_); | 443 for (ExclusivePageIterator it(this); !it.done(); it.Advance()) { |
408 NoGCScope no_gc; | 444 if (it.page()->Contains(addr)) { |
409 HeapPage* page = pages_; | |
410 while (page != NULL) { | |
411 if (page->Contains(addr)) { | |
412 return true; | 445 return true; |
413 } | 446 } |
414 page = NextPageAnySize(page); | |
415 } | 447 } |
416 return false; | 448 return false; |
417 } | 449 } |
418 | 450 |
419 | 451 |
420 bool PageSpace::Contains(uword addr, HeapPage::PageType type) const { | 452 bool PageSpace::Contains(uword addr, HeapPage::PageType type) const { |
421 MutexLocker ml(pages_lock_); | 453 for (ExclusivePageIterator it(this); !it.done(); it.Advance()) { |
422 NoGCScope no_gc; | 454 if ((it.page()->type() == type) && it.page()->Contains(addr)) { |
423 HeapPage* page = pages_; | |
424 while (page != NULL) { | |
425 if ((page->type() == type) && page->Contains(addr)) { | |
426 return true; | 455 return true; |
427 } | 456 } |
428 page = NextPageAnySize(page); | |
429 } | 457 } |
430 return false; | 458 return false; |
431 } | 459 } |
432 | 460 |
433 | 461 |
434 void PageSpace::StartEndAddress(uword* start, uword* end) const { | 462 void PageSpace::StartEndAddress(uword* start, uword* end) const { |
435 MutexLocker ml(pages_lock_); | |
436 NoGCScope no_gc; | |
437 ASSERT((pages_ != NULL) || (exec_pages_ != NULL) || (large_pages_ != NULL)); | 463 ASSERT((pages_ != NULL) || (exec_pages_ != NULL) || (large_pages_ != NULL)); |
438 *start = static_cast<uword>(~0); | 464 *start = static_cast<uword>(~0); |
439 *end = 0; | 465 *end = 0; |
440 for (HeapPage* page = pages_; page != NULL; page = NextPageAnySize(page)) { | 466 for (ExclusivePageIterator it(this); !it.done(); it.Advance()) { |
441 *start = Utils::Minimum(*start, page->object_start()); | 467 *start = Utils::Minimum(*start, it.page()->object_start()); |
442 *end = Utils::Maximum(*end, page->object_end()); | 468 *end = Utils::Maximum(*end, it.page()->object_end()); |
443 } | 469 } |
444 ASSERT(*start != static_cast<uword>(~0)); | 470 ASSERT(*start != static_cast<uword>(~0)); |
445 ASSERT(*end != 0); | 471 ASSERT(*end != 0); |
446 } | 472 } |
447 | 473 |
448 | 474 |
449 void PageSpace::VisitObjects(ObjectVisitor* visitor) const { | 475 void PageSpace::VisitObjects(ObjectVisitor* visitor) const { |
450 MutexLocker ml(pages_lock_); | 476 for (ExclusivePageIterator it(this); !it.done(); it.Advance()) { |
451 NoGCScope no_gc; | 477 it.page()->VisitObjects(visitor); |
452 HeapPage* page = pages_; | |
453 while (page != NULL) { | |
454 page->VisitObjects(visitor); | |
455 page = NextPageAnySize(page); | |
456 } | 478 } |
457 } | 479 } |
458 | 480 |
459 | 481 |
460 void PageSpace::VisitObjectPointers(ObjectPointerVisitor* visitor) const { | 482 void PageSpace::VisitObjectPointers(ObjectPointerVisitor* visitor) const { |
461 MutexLocker ml(pages_lock_); | 483 for (ExclusivePageIterator it(this); !it.done(); it.Advance()) { |
462 NoGCScope no_gc; | 484 it.page()->VisitObjectPointers(visitor); |
463 HeapPage* page = pages_; | |
464 while (page != NULL) { | |
465 page->VisitObjectPointers(visitor); | |
466 page = NextPageAnySize(page); | |
467 } | 485 } |
468 } | 486 } |
469 | 487 |
470 | 488 |
471 RawObject* PageSpace::FindObject(FindObjectVisitor* visitor, | 489 RawObject* PageSpace::FindObject(FindObjectVisitor* visitor, |
472 HeapPage::PageType type) const { | 490 HeapPage::PageType type) const { |
473 ASSERT(Isolate::Current()->no_gc_scope_depth() != 0); | 491 for (ExclusivePageIterator it(this); !it.done(); it.Advance()) { |
474 MutexLocker ml(pages_lock_); | 492 if (it.page()->type() == type) { |
475 NoGCScope no_gc; | 493 RawObject* obj = it.page()->FindObject(visitor); |
476 HeapPage* page = pages_; | |
477 while (page != NULL) { | |
478 if (page->type() == type) { | |
479 RawObject* obj = page->FindObject(visitor); | |
480 if (obj != Object::null()) { | 494 if (obj != Object::null()) { |
481 return obj; | 495 return obj; |
482 } | 496 } |
483 } | 497 } |
484 page = NextPageAnySize(page); | |
485 } | 498 } |
486 return Object::null(); | 499 return Object::null(); |
487 } | 500 } |
488 | 501 |
489 | 502 |
490 void PageSpace::WriteProtect(bool read_only) { | 503 void PageSpace::WriteProtect(bool read_only) { |
491 MutexLocker ml(pages_lock_); | 504 for (ExclusivePageIterator it(this); !it.done(); it.Advance()) { |
492 NoGCScope no_gc; | 505 it.page()->WriteProtect(read_only); |
493 HeapPage* page = pages_; | |
494 while (page != NULL) { | |
495 page->WriteProtect(read_only); | |
496 page = NextPageAnySize(page); | |
497 } | 506 } |
498 } | 507 } |
499 | 508 |
500 | 509 |
501 void PageSpace::PrintToJSONObject(JSONObject* object) { | 510 void PageSpace::PrintToJSONObject(JSONObject* object) { |
502 Isolate* isolate = Isolate::Current(); | 511 Isolate* isolate = Isolate::Current(); |
503 ASSERT(isolate != NULL); | 512 ASSERT(isolate != NULL); |
504 JSONObject space(object, "old"); | 513 JSONObject space(object, "old"); |
505 space.AddProperty("type", "HeapSpace"); | 514 space.AddProperty("type", "HeapSpace"); |
506 space.AddProperty("id", "heaps/old"); | 515 space.AddProperty("id", "heaps/old"); |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
547 heap_map.AddProperty("unit_size_bytes", | 556 heap_map.AddProperty("unit_size_bytes", |
548 static_cast<intptr_t>(kObjectAlignment)); | 557 static_cast<intptr_t>(kObjectAlignment)); |
549 heap_map.AddProperty("page_size_bytes", kPageSizeInWords * kWordSize); | 558 heap_map.AddProperty("page_size_bytes", kPageSizeInWords * kWordSize); |
550 { | 559 { |
551 JSONObject class_list(&heap_map, "class_list"); | 560 JSONObject class_list(&heap_map, "class_list"); |
552 isolate->class_table()->PrintToJSONObject(&class_list); | 561 isolate->class_table()->PrintToJSONObject(&class_list); |
553 } | 562 } |
554 { | 563 { |
555 // "pages" is an array [page0, page1, ..., pageN], each page of the form | 564 // "pages" is an array [page0, page1, ..., pageN], each page of the form |
556 // {"object_start": "0x...", "objects": [size, class id, size, ...]} | 565 // {"object_start": "0x...", "objects": [size, class id, size, ...]} |
| 566 // TODO(19445): Use ExclusivePageIterator once HeapMap supports large pages. |
557 MutexLocker ml(pages_lock_); | 567 MutexLocker ml(pages_lock_); |
| 568 MakeIterable(); |
558 NoGCScope no_gc; | 569 NoGCScope no_gc; |
559 JSONArray all_pages(&heap_map, "pages"); | 570 JSONArray all_pages(&heap_map, "pages"); |
560 for (HeapPage* page = pages_; page != NULL; page = page->next()) { | 571 for (HeapPage* page = pages_; page != NULL; page = page->next()) { |
561 JSONObject page_container(&all_pages); | 572 JSONObject page_container(&all_pages); |
562 page_container.AddPropertyF("object_start", | 573 page_container.AddPropertyF("object_start", |
563 "0x%" Px "", page->object_start()); | 574 "0x%" Px "", page->object_start()); |
564 JSONArray page_map(&page_container, "objects"); | 575 JSONArray page_map(&page_container, "objects"); |
565 HeapMapAsJSONVisitor printer(&page_map); | 576 HeapMapAsJSONVisitor printer(&page_map); |
566 page->VisitObjects(&printer); | 577 page->VisitObjects(&printer); |
567 } | 578 } |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
655 | 666 |
656 // Mark all reachable old-gen objects. | 667 // Mark all reachable old-gen objects. |
657 bool collect_code = FLAG_collect_code && ShouldCollectCode(); | 668 bool collect_code = FLAG_collect_code && ShouldCollectCode(); |
658 GCMarker marker(heap_); | 669 GCMarker marker(heap_); |
659 marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); | 670 marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); |
660 usage_.used_in_words = marker.marked_words(); | 671 usage_.used_in_words = marker.marked_words(); |
661 | 672 |
662 int64_t mid1 = OS::GetCurrentTimeMicros(); | 673 int64_t mid1 = OS::GetCurrentTimeMicros(); |
663 | 674 |
664 // Abandon the remainder of the bump allocation block. | 675 // Abandon the remainder of the bump allocation block. |
| 676 MakeIterable(); |
665 bump_top_ = 0; | 677 bump_top_ = 0; |
666 bump_end_ = 0; | 678 bump_end_ = 0; |
667 // Reset the freelists and setup sweeping. | 679 // Reset the freelists and setup sweeping. |
668 freelist_[HeapPage::kData].Reset(); | 680 freelist_[HeapPage::kData].Reset(); |
669 freelist_[HeapPage::kExecutable].Reset(); | 681 freelist_[HeapPage::kExecutable].Reset(); |
670 | 682 |
671 int64_t mid2 = OS::GetCurrentTimeMicros(); | 683 int64_t mid2 = OS::GetCurrentTimeMicros(); |
672 int64_t mid3 = 0; | 684 int64_t mid3 = 0; |
673 | 685 |
674 { | 686 { |
675 GCSweeper sweeper(heap_); | 687 GCSweeper sweeper; |
676 | 688 |
677 // During stop-the-world phases we should use bulk lock when adding elements | 689 // During stop-the-world phases we should use bulk lock when adding elements |
678 // to the free list. | 690 // to the free list. |
679 MutexLocker mld(freelist_[HeapPage::kData].mutex()); | 691 MutexLocker mld(freelist_[HeapPage::kData].mutex()); |
680 MutexLocker mle(freelist_[HeapPage::kExecutable].mutex()); | 692 MutexLocker mle(freelist_[HeapPage::kExecutable].mutex()); |
681 | 693 |
682 // Large and executable pages are always swept immediately. | 694 // Large and executable pages are always swept immediately. |
683 HeapPage* prev_page = NULL; | 695 HeapPage* prev_page = NULL; |
684 HeapPage* page = large_pages_; | 696 HeapPage* page = large_pages_; |
685 while (page != NULL) { | 697 while (page != NULL) { |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
805 } | 817 } |
806 } | 818 } |
807 bump_top_ = reinterpret_cast<uword>(block); | 819 bump_top_ = reinterpret_cast<uword>(block); |
808 bump_end_ = bump_top_ + block_size; | 820 bump_end_ = bump_top_ + block_size; |
809 remaining = block_size; | 821 remaining = block_size; |
810 } | 822 } |
811 ASSERT(remaining >= size); | 823 ASSERT(remaining >= size); |
812 uword result = bump_top_; | 824 uword result = bump_top_; |
813 bump_top_ += size; | 825 bump_top_ += size; |
814 usage_.used_in_words += size >> kWordSizeLog2; | 826 usage_.used_in_words += size >> kWordSizeLog2; |
815 remaining -= size; | 827 // Note: Remaining block is unwalkable until MakeIterable is called. |
816 if (remaining > 0) { | 828 #ifdef DEBUG |
817 FreeListElement::AsElement(bump_top_, remaining); | 829 if (bump_top_ < bump_end_) { |
| 830 // Fail fast if we try to walk the remaining block. |
| 831 COMPILE_ASSERT(kIllegalCid == 0); |
| 832 *reinterpret_cast<uword*>(bump_top_) = 0; |
818 } | 833 } |
| 834 #endif // DEBUG |
819 return result; | 835 return result; |
820 } | 836 } |
821 | 837 |
822 | 838 |
823 uword PageSpace::TryAllocateDataBump(intptr_t size, | 839 uword PageSpace::TryAllocateDataBump(intptr_t size, |
824 GrowthPolicy growth_policy) { | 840 GrowthPolicy growth_policy) { |
825 return TryAllocateDataBumpInternal(size, growth_policy, false); | 841 return TryAllocateDataBumpInternal(size, growth_policy, false); |
826 } | 842 } |
827 | 843 |
828 | 844 |
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
971 return 0; | 987 return 0; |
972 } else { | 988 } else { |
973 ASSERT(total_time >= gc_time); | 989 ASSERT(total_time >= gc_time); |
974 int result= static_cast<int>((static_cast<double>(gc_time) / | 990 int result= static_cast<int>((static_cast<double>(gc_time) / |
975 static_cast<double>(total_time)) * 100); | 991 static_cast<double>(total_time)) * 100); |
976 return result; | 992 return result; |
977 } | 993 } |
978 } | 994 } |
979 | 995 |
980 } // namespace dart | 996 } // namespace dart |
OLD | NEW |