Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(228)

Side by Side Diff: src/heap.cc

Issue 68203029: Make number of available threads isolate-dependent and expose it to ResourceConstraints. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: address comments Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/heap-profiler.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 447 matching lines...) Expand 10 before | Expand all | Expand 10 after
458 #ifdef DEBUG 458 #ifdef DEBUG
459 ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); 459 ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
460 460
461 if (FLAG_gc_verbose) Print(); 461 if (FLAG_gc_verbose) Print();
462 462
463 ReportStatisticsBeforeGC(); 463 ReportStatisticsBeforeGC();
464 #endif // DEBUG 464 #endif // DEBUG
465 465
466 store_buffer()->GCPrologue(); 466 store_buffer()->GCPrologue();
467 467
468 if (FLAG_concurrent_osr) { 468 if (isolate()->concurrent_osr_enabled()) {
469 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs(); 469 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
470 } 470 }
471 } 471 }
472 472
473 473
474 intptr_t Heap::SizeOfObjects() { 474 intptr_t Heap::SizeOfObjects() {
475 intptr_t total = 0; 475 intptr_t total = 0;
476 AllSpaces spaces(this); 476 AllSpaces spaces(this);
477 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { 477 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
478 total += space->SizeOfObjects(); 478 total += space->SizeOfObjects();
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
655 // not matter, so long as we do not specify NEW_SPACE, which would not 655 // not matter, so long as we do not specify NEW_SPACE, which would not
656 // cause a full GC. 656 // cause a full GC.
657 // Major GC would invoke weak handle callbacks on weakly reachable 657 // Major GC would invoke weak handle callbacks on weakly reachable
658 // handles, but won't collect weakly reachable objects until next 658 // handles, but won't collect weakly reachable objects until next
659 // major GC. Therefore if we collect aggressively and weak handle callback 659 // major GC. Therefore if we collect aggressively and weak handle callback
660 // has been invoked, we rerun major GC to release objects which become 660 // has been invoked, we rerun major GC to release objects which become
661 // garbage. 661 // garbage.
662 // Note: as weak callbacks can execute arbitrary code, we cannot 662 // Note: as weak callbacks can execute arbitrary code, we cannot
663 // hope that eventually there will be no weak callbacks invocations. 663 // hope that eventually there will be no weak callbacks invocations.
664 // Therefore stop recollecting after several attempts. 664 // Therefore stop recollecting after several attempts.
665 if (FLAG_concurrent_recompilation) { 665 if (isolate()->concurrent_recompilation_enabled()) {
666 // The optimizing compiler may be unnecessarily holding on to memory. 666 // The optimizing compiler may be unnecessarily holding on to memory.
667 DisallowHeapAllocation no_recursive_gc; 667 DisallowHeapAllocation no_recursive_gc;
668 isolate()->optimizing_compiler_thread()->Flush(); 668 isolate()->optimizing_compiler_thread()->Flush();
669 } 669 }
670 mark_compact_collector()->SetFlags(kMakeHeapIterableMask | 670 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
671 kReduceMemoryFootprintMask); 671 kReduceMemoryFootprintMask);
672 isolate_->compilation_cache()->Clear(); 672 isolate_->compilation_cache()->Clear();
673 const int kMaxNumberOfAttempts = 7; 673 const int kMaxNumberOfAttempts = 7;
674 const int kMinNumberOfAttempts = 2; 674 const int kMinNumberOfAttempts = 2;
675 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { 675 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
756 incremental_marking()->WorthActivating() && 756 incremental_marking()->WorthActivating() &&
757 NextGCIsLikelyToBeFull()) { 757 NextGCIsLikelyToBeFull()) {
758 incremental_marking()->Start(); 758 incremental_marking()->Start();
759 } 759 }
760 760
761 return next_gc_likely_to_collect_more; 761 return next_gc_likely_to_collect_more;
762 } 762 }
763 763
764 764
765 int Heap::NotifyContextDisposed() { 765 int Heap::NotifyContextDisposed() {
766 if (FLAG_concurrent_recompilation) { 766 if (isolate()->concurrent_recompilation_enabled()) {
767 // Flush the queued recompilation tasks. 767 // Flush the queued recompilation tasks.
768 isolate()->optimizing_compiler_thread()->Flush(); 768 isolate()->optimizing_compiler_thread()->Flush();
769 } 769 }
770 flush_monomorphic_ics_ = true; 770 flush_monomorphic_ics_ = true;
771 AgeInlineCaches(); 771 AgeInlineCaches();
772 return ++contexts_disposed_; 772 return ++contexts_disposed_;
773 } 773 }
774 774
775 775
776 void Heap::PerformScavenge() { 776 void Heap::PerformScavenge() {
(...skipping 5797 matching lines...) Expand 10 before | Expand all | Expand 10 after
6574 return old_pointer_space_->SizeOfObjects() 6574 return old_pointer_space_->SizeOfObjects()
6575 + old_data_space_->SizeOfObjects() 6575 + old_data_space_->SizeOfObjects()
6576 + code_space_->SizeOfObjects() 6576 + code_space_->SizeOfObjects()
6577 + map_space_->SizeOfObjects() 6577 + map_space_->SizeOfObjects()
6578 + cell_space_->SizeOfObjects() 6578 + cell_space_->SizeOfObjects()
6579 + property_cell_space_->SizeOfObjects() 6579 + property_cell_space_->SizeOfObjects()
6580 + lo_space_->SizeOfObjects(); 6580 + lo_space_->SizeOfObjects();
6581 } 6581 }
6582 6582
6583 6583
6584 bool Heap::AdvanceSweepers(int step_size) {
6585 ASSERT(isolate()->num_sweeper_threads() == 0);
6586 bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
6587 sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
6588 return sweeping_complete;
6589 }
6590
6591
6584 intptr_t Heap::PromotedExternalMemorySize() { 6592 intptr_t Heap::PromotedExternalMemorySize() {
6585 if (amount_of_external_allocated_memory_ 6593 if (amount_of_external_allocated_memory_
6586 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0; 6594 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6587 return amount_of_external_allocated_memory_ 6595 return amount_of_external_allocated_memory_
6588 - amount_of_external_allocated_memory_at_last_global_gc_; 6596 - amount_of_external_allocated_memory_at_last_global_gc_;
6589 } 6597 }
6590 6598
6591 6599
6592 void Heap::EnableInlineAllocation() { 6600 void Heap::EnableInlineAllocation() {
6593 ASSERT(inline_allocation_disabled_); 6601 ASSERT(inline_allocation_disabled_);
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
6720 set_hash_seed(Smi::FromInt(FLAG_hash_seed)); 6728 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6721 } 6729 }
6722 } 6730 }
6723 6731
6724 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); 6732 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6725 LOG(isolate_, IntPtrTEvent("heap-available", Available())); 6733 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6726 6734
6727 store_buffer()->SetUp(); 6735 store_buffer()->SetUp();
6728 6736
6729 if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex; 6737 if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
6730 #ifdef DEBUG
6731 relocation_mutex_locked_by_optimizer_thread_ = false;
6732 #endif // DEBUG
6733 6738
6734 return true; 6739 return true;
6735 } 6740 }
6736 6741
6737 6742
6738 bool Heap::CreateHeapObjects() { 6743 bool Heap::CreateHeapObjects() {
6739 // Create initial maps. 6744 // Create initial maps.
6740 if (!CreateInitialMaps()) return false; 6745 if (!CreateInitialMaps()) return false;
6741 if (!CreateApiObjects()) return false; 6746 if (!CreateApiObjects()) return false;
6742 6747
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
6867 delete lo_space_; 6872 delete lo_space_;
6868 lo_space_ = NULL; 6873 lo_space_ = NULL;
6869 } 6874 }
6870 6875
6871 store_buffer()->TearDown(); 6876 store_buffer()->TearDown();
6872 incremental_marking()->TearDown(); 6877 incremental_marking()->TearDown();
6873 6878
6874 isolate_->memory_allocator()->TearDown(); 6879 isolate_->memory_allocator()->TearDown();
6875 6880
6876 delete relocation_mutex_; 6881 delete relocation_mutex_;
6882 relocation_mutex_ = NULL;
6877 } 6883 }
6878 6884
6879 6885
6880 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, 6886 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
6881 GCType gc_type, 6887 GCType gc_type,
6882 bool pass_isolate) { 6888 bool pass_isolate) {
6883 ASSERT(callback != NULL); 6889 ASSERT(callback != NULL);
6884 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate); 6890 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
6885 ASSERT(!gc_prologue_callbacks_.Contains(pair)); 6891 ASSERT(!gc_prologue_callbacks_.Contains(pair));
6886 return gc_prologue_callbacks_.Add(pair); 6892 return gc_prologue_callbacks_.Add(pair);
(...skipping 1059 matching lines...) Expand 10 before | Expand all | Expand 10 after
7946 counters->size_of_CODE_AGE_##name()->Decrement( \ 7952 counters->size_of_CODE_AGE_##name()->Decrement( \
7947 static_cast<int>(object_sizes_last_time_[index])); 7953 static_cast<int>(object_sizes_last_time_[index]));
7948 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) 7954 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
7949 #undef ADJUST_LAST_TIME_OBJECT_COUNT 7955 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7950 7956
7951 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); 7957 OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7952 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); 7958 OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7953 ClearObjectStats(); 7959 ClearObjectStats();
7954 } 7960 }
7955 7961
7956
7957 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
7958 if (FLAG_concurrent_recompilation) {
7959 heap_->relocation_mutex_->Lock();
7960 #ifdef DEBUG
7961 heap_->relocation_mutex_locked_by_optimizer_thread_ =
7962 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
7963 #endif // DEBUG
7964 }
7965 }
7966
7967 } } // namespace v8::internal 7962 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-profiler.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698