Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(272)

Side by Side Diff: src/heap/heap.cc

Issue 1535723002: [heap] Use HashMap as scratchpad backing store (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Revive the founder counter on the AllocationSite Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/scopeinfo.h" 9 #include "src/ast/scopeinfo.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
143 memory_reducer_(nullptr), 143 memory_reducer_(nullptr),
144 object_stats_(nullptr), 144 object_stats_(nullptr),
145 scavenge_job_(nullptr), 145 scavenge_job_(nullptr),
146 idle_scavenge_observer_(nullptr), 146 idle_scavenge_observer_(nullptr),
147 full_codegen_bytes_generated_(0), 147 full_codegen_bytes_generated_(0),
148 crankshaft_codegen_bytes_generated_(0), 148 crankshaft_codegen_bytes_generated_(0),
149 new_space_allocation_counter_(0), 149 new_space_allocation_counter_(0),
150 old_generation_allocation_counter_(0), 150 old_generation_allocation_counter_(0),
151 old_generation_size_at_last_gc_(0), 151 old_generation_size_at_last_gc_(0),
152 gcs_since_last_deopt_(0), 152 gcs_since_last_deopt_(0),
153 allocation_sites_scratchpad_length_(0), 153 global_pretenuring_feedback_(nullptr),
154 ring_buffer_full_(false), 154 ring_buffer_full_(false),
155 ring_buffer_end_(0), 155 ring_buffer_end_(0),
156 promotion_queue_(this), 156 promotion_queue_(this),
157 configured_(false), 157 configured_(false),
158 current_gc_flags_(Heap::kNoGCFlags), 158 current_gc_flags_(Heap::kNoGCFlags),
159 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags), 159 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
160 external_string_table_(this), 160 external_string_table_(this),
161 chunks_queued_for_free_(NULL), 161 chunks_queued_for_free_(NULL),
162 concurrent_unmapping_tasks_active_(0), 162 concurrent_unmapping_tasks_active_(0),
163 pending_unmapping_tasks_semaphore_(0), 163 pending_unmapping_tasks_semaphore_(0),
(...skipping 337 matching lines...) Expand 10 before | Expand all | Expand 10 after
501 501
502 void Heap::RepairFreeListsAfterDeserialization() { 502 void Heap::RepairFreeListsAfterDeserialization() {
503 PagedSpaces spaces(this); 503 PagedSpaces spaces(this);
504 for (PagedSpace* space = spaces.next(); space != NULL; 504 for (PagedSpace* space = spaces.next(); space != NULL;
505 space = spaces.next()) { 505 space = spaces.next()) {
506 space->RepairFreeListsAfterDeserialization(); 506 space->RepairFreeListsAfterDeserialization();
507 } 507 }
508 } 508 }
509 509
510 510
511 bool Heap::ProcessPretenuringFeedback() { 511 void Heap::MergeAllocationSitePretenuringFeedback(
512 const HashMap& local_pretenuring_feedback) {
513 AllocationSite* site = nullptr;
514 for (HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
515 local_entry != nullptr;
516 local_entry = local_pretenuring_feedback.Next(local_entry)) {
517 site = reinterpret_cast<AllocationSite*>(local_entry->key);
518 MapWord map_word = site->map_word();
519 if (map_word.IsForwardingAddress()) {
520 site = AllocationSite::cast(map_word.ToForwardingAddress());
521 }
522 DCHECK(site->IsAllocationSite());
523 int value =
524 static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
525 DCHECK_GT(value, 0);
526
527 {
528 // TODO(mlippautz): For parallel processing we need synchronization here.
529 if (site->IncrementMementoFoundCount(value)) {
530 global_pretenuring_feedback_->LookupOrInsert(
531 site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
532 }
533 }
534 }
535 }
536
537
538 class Heap::PretenuringScope {
539 public:
540 explicit PretenuringScope(Heap* heap) : heap_(heap) {
541 heap_->global_pretenuring_feedback_ =
542 new HashMap(HashMap::PointersMatch, kInitialFeedbackCapacity);
543 }
544
545 ~PretenuringScope() {
546 delete heap_->global_pretenuring_feedback_;
547 heap_->global_pretenuring_feedback_ = nullptr;
548 }
549
550 private:
551 Heap* heap_;
552 };
553
554
555 void Heap::ProcessPretenuringFeedback() {
512 bool trigger_deoptimization = false; 556 bool trigger_deoptimization = false;
513 if (FLAG_allocation_site_pretenuring) { 557 if (FLAG_allocation_site_pretenuring) {
514 int tenure_decisions = 0; 558 int tenure_decisions = 0;
515 int dont_tenure_decisions = 0; 559 int dont_tenure_decisions = 0;
516 int allocation_mementos_found = 0; 560 int allocation_mementos_found = 0;
517 int allocation_sites = 0; 561 int allocation_sites = 0;
518 int active_allocation_sites = 0; 562 int active_allocation_sites = 0;
519 563
520 // If the scratchpad overflowed, we have to iterate over the allocation 564 AllocationSite* site = nullptr;
521 // sites list. 565
522 // TODO(hpayer): We iterate over the whole list of allocation sites when 566 // Step 1: Digest feedback for recorded allocation sites.
523 // we grew to the maximum semi-space size to deopt maybe tenured 567 bool maximum_size_scavenge = MaximumSizeScavenge();
524 // allocation sites. We could hold the maybe tenured allocation sites 568 for (HashMap::Entry* e = global_pretenuring_feedback_->Start();
525 // in a seperate data structure if this is a performance problem. 569 e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
570 site = reinterpret_cast<AllocationSite*>(e->key);
571 int found_count = site->memento_found_count();
572 // The fact that we have an entry in the storage means that we've found
573 // the site at least once.
574 DCHECK_GT(found_count, 0);
575 DCHECK(site->IsAllocationSite());
576 allocation_sites++;
577 active_allocation_sites++;
578 allocation_mementos_found += found_count;
579 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
580 trigger_deoptimization = true;
581 }
582 if (site->GetPretenureMode() == TENURED) {
583 tenure_decisions++;
584 } else {
585 dont_tenure_decisions++;
586 }
587 }
588
589 // Step 2: Deopt maybe tenured allocation sites if necessary.
526 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites(); 590 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
527 bool use_scratchpad = 591 if (deopt_maybe_tenured) {
528 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize && 592 Object* list_element = allocation_sites_list();
529 !deopt_maybe_tenured; 593 while (list_element->IsAllocationSite()) {
530 594 site = AllocationSite::cast(list_element);
531 int i = 0; 595 DCHECK(site->IsAllocationSite());
532 Object* list_element = allocation_sites_list(); 596 allocation_sites++;
533 bool maximum_size_scavenge = MaximumSizeScavenge(); 597 if (site->IsMaybeTenure()) {
534 while (use_scratchpad ? i < allocation_sites_scratchpad_length_ 598 site->set_deopt_dependent_code(true);
535 : list_element->IsAllocationSite()) {
536 AllocationSite* site =
537 use_scratchpad
538 ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
539 : AllocationSite::cast(list_element);
540 allocation_mementos_found += site->memento_found_count();
541 if (site->memento_found_count() > 0) {
542 active_allocation_sites++;
543 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
544 trigger_deoptimization = true; 599 trigger_deoptimization = true;
545 } 600 }
546 if (site->GetPretenureMode() == TENURED) {
547 tenure_decisions++;
548 } else {
549 dont_tenure_decisions++;
550 }
551 allocation_sites++;
552 }
553
554 if (deopt_maybe_tenured && site->IsMaybeTenure()) {
555 site->set_deopt_dependent_code(true);
556 trigger_deoptimization = true;
557 }
558
559 if (use_scratchpad) {
560 i++;
561 } else {
562 list_element = site->weak_next(); 601 list_element = site->weak_next();
563 } 602 }
564 } 603 }
565 604
566 if (trigger_deoptimization) { 605 if (trigger_deoptimization) {
567 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites(); 606 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
568 } 607 }
569 608
570 FlushAllocationSitesScratchpad();
571
572 if (FLAG_trace_pretenuring_statistics && 609 if (FLAG_trace_pretenuring_statistics &&
573 (allocation_mementos_found > 0 || tenure_decisions > 0 || 610 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
574 dont_tenure_decisions > 0)) { 611 dont_tenure_decisions > 0)) {
575 PrintF( 612 PrintIsolate(isolate(),
576 "GC: (mode, #visited allocation sites, #active allocation sites, " 613 "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
577 "#mementos, #tenure decisions, #donttenure decisions) " 614 "active_sites=%d "
578 "(%s, %d, %d, %d, %d, %d)\n", 615 "mementos=%d tenured=%d not_tenured=%d\n",
579 use_scratchpad ? "use scratchpad" : "use list", allocation_sites, 616 deopt_maybe_tenured ? 1 : 0, allocation_sites,
580 active_allocation_sites, allocation_mementos_found, tenure_decisions, 617 active_allocation_sites, allocation_mementos_found,
581 dont_tenure_decisions); 618 tenure_decisions, dont_tenure_decisions);
582 } 619 }
583 } 620 }
584 return trigger_deoptimization;
585 } 621 }
586 622
587 623
588 void Heap::DeoptMarkedAllocationSites() { 624 void Heap::DeoptMarkedAllocationSites() {
589 // TODO(hpayer): If iterating over the allocation sites list becomes a 625 // TODO(hpayer): If iterating over the allocation sites list becomes a
590 // performance issue, use a cache heap data structure instead (similar to the 626 // performance issue, use a cache data structure in heap instead.
591 // allocation sites scratchpad).
592 Object* list_element = allocation_sites_list(); 627 Object* list_element = allocation_sites_list();
593 while (list_element->IsAllocationSite()) { 628 while (list_element->IsAllocationSite()) {
594 AllocationSite* site = AllocationSite::cast(list_element); 629 AllocationSite* site = AllocationSite::cast(list_element);
595 if (site->deopt_dependent_code()) { 630 if (site->deopt_dependent_code()) {
596 site->dependent_code()->MarkCodeForDeoptimization( 631 site->dependent_code()->MarkCodeForDeoptimization(
597 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup); 632 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
598 site->set_deopt_dependent_code(false); 633 site->set_deopt_dependent_code(false);
599 } 634 }
600 list_element = site->weak_next(); 635 list_element = site->weak_next();
601 } 636 }
(...skipping 653 matching lines...) Expand 10 before | Expand all | Expand 10 after
1255 1290
1256 int start_new_space_size = Heap::new_space()->SizeAsInt(); 1291 int start_new_space_size = Heap::new_space()->SizeAsInt();
1257 1292
1258 if (IsHighSurvivalRate()) { 1293 if (IsHighSurvivalRate()) {
1259 // We speed up the incremental marker if it is running so that it 1294 // We speed up the incremental marker if it is running so that it
1260 // does not fall behind the rate of promotion, which would cause a 1295 // does not fall behind the rate of promotion, which would cause a
1261 // constantly growing old space. 1296 // constantly growing old space.
1262 incremental_marking()->NotifyOfHighPromotionRate(); 1297 incremental_marking()->NotifyOfHighPromotionRate();
1263 } 1298 }
1264 1299
1265 if (collector == MARK_COMPACTOR) { 1300 {
1266 UpdateOldGenerationAllocationCounter(); 1301 Heap::PretenuringScope pretenuring_scope(this);
1267 // Perform mark-sweep with optional compaction. 1302
1268 MarkCompact(); 1303 if (collector == MARK_COMPACTOR) {
1269 old_gen_exhausted_ = false; 1304 UpdateOldGenerationAllocationCounter();
1270 old_generation_size_configured_ = true; 1305 // Perform mark-sweep with optional compaction.
1271 // This should be updated before PostGarbageCollectionProcessing, which can 1306 MarkCompact();
1272 // cause another GC. Take into account the objects promoted during GC. 1307 old_gen_exhausted_ = false;
1273 old_generation_allocation_counter_ += 1308 old_generation_size_configured_ = true;
1274 static_cast<size_t>(promoted_objects_size_); 1309 // This should be updated before PostGarbageCollectionProcessing, which
1275 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects(); 1310 // can cause another GC. Take into account the objects promoted during GC.
1276 } else { 1311 old_generation_allocation_counter_ +=
1277 Scavenge(); 1312 static_cast<size_t>(promoted_objects_size_);
1313 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
1314 } else {
1315 Scavenge();
1316 }
1317
1318 ProcessPretenuringFeedback();
1278 } 1319 }
1279 1320
1280 ProcessPretenuringFeedback();
1281 UpdateSurvivalStatistics(start_new_space_size); 1321 UpdateSurvivalStatistics(start_new_space_size);
1282 ConfigureInitialOldGenerationSize(); 1322 ConfigureInitialOldGenerationSize();
1283 1323
1284 isolate_->counters()->objs_since_last_young()->Set(0); 1324 isolate_->counters()->objs_since_last_young()->Set(0);
1285 1325
1286 if (collector != SCAVENGER) { 1326 if (collector != SCAVENGER) {
1287 // Callbacks that fire after this point might trigger nested GCs and 1327 // Callbacks that fire after this point might trigger nested GCs and
1288 // restart incremental marking, the assertion can't be moved down. 1328 // restart incremental marking, the assertion can't be moved down.
1289 DCHECK(incremental_marking()->IsStopped()); 1329 DCHECK(incremental_marking()->IsStopped());
1290 1330
(...skipping 532 matching lines...) Expand 10 before | Expand all | Expand 10 after
1823 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) { 1863 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1824 DisallowHeapAllocation no_allocation_scope; 1864 DisallowHeapAllocation no_allocation_scope;
1825 Object* cur = allocation_sites_list(); 1865 Object* cur = allocation_sites_list();
1826 bool marked = false; 1866 bool marked = false;
1827 while (cur->IsAllocationSite()) { 1867 while (cur->IsAllocationSite()) {
1828 AllocationSite* casted = AllocationSite::cast(cur); 1868 AllocationSite* casted = AllocationSite::cast(cur);
1829 if (casted->GetPretenureMode() == flag) { 1869 if (casted->GetPretenureMode() == flag) {
1830 casted->ResetPretenureDecision(); 1870 casted->ResetPretenureDecision();
1831 casted->set_deopt_dependent_code(true); 1871 casted->set_deopt_dependent_code(true);
1832 marked = true; 1872 marked = true;
1873 RemoveAllocationSitePretenuringFeedback(casted);
1833 } 1874 }
1834 cur = casted->weak_next(); 1875 cur = casted->weak_next();
1835 } 1876 }
1836 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites(); 1877 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1837 } 1878 }
1838 1879
1839 1880
1840 void Heap::EvaluateOldSpaceLocalPretenuring( 1881 void Heap::EvaluateOldSpaceLocalPretenuring(
1841 uint64_t size_of_objects_before_gc) { 1882 uint64_t size_of_objects_before_gc) {
1842 uint64_t size_of_objects_after_gc = SizeOfObjects(); 1883 uint64_t size_of_objects_after_gc = SizeOfObjects();
(...skipping 985 matching lines...) Expand 10 before | Expand all | Expand 10 after
2828 2869
2829 set_weak_stack_trace_list(Smi::FromInt(0)); 2870 set_weak_stack_trace_list(Smi::FromInt(0));
2830 2871
2831 set_noscript_shared_function_infos(Smi::FromInt(0)); 2872 set_noscript_shared_function_infos(Smi::FromInt(0));
2832 2873
2833 // Will be filled in by Interpreter::Initialize(). 2874 // Will be filled in by Interpreter::Initialize().
2834 set_interpreter_table( 2875 set_interpreter_table(
2835 *interpreter::Interpreter::CreateUninitializedInterpreterTable( 2876 *interpreter::Interpreter::CreateUninitializedInterpreterTable(
2836 isolate())); 2877 isolate()));
2837 2878
2838 set_allocation_sites_scratchpad(
2839 *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
2840 InitializeAllocationSitesScratchpad();
2841
2842 // Initialize keyed lookup cache. 2879 // Initialize keyed lookup cache.
2843 isolate_->keyed_lookup_cache()->Clear(); 2880 isolate_->keyed_lookup_cache()->Clear();
2844 2881
2845 // Initialize context slot cache. 2882 // Initialize context slot cache.
2846 isolate_->context_slot_cache()->Clear(); 2883 isolate_->context_slot_cache()->Clear();
2847 2884
2848 // Initialize descriptor cache. 2885 // Initialize descriptor cache.
2849 isolate_->descriptor_lookup_cache()->Clear(); 2886 isolate_->descriptor_lookup_cache()->Clear();
2850 2887
2851 // Initialize compilation cache. 2888 // Initialize compilation cache.
2852 isolate_->compilation_cache()->Clear(); 2889 isolate_->compilation_cache()->Clear();
2853 } 2890 }
2854 2891
2855 2892
2856 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) { 2893 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2857 switch (root_index) { 2894 switch (root_index) {
2858 case kStoreBufferTopRootIndex: 2895 case kStoreBufferTopRootIndex:
2859 case kNumberStringCacheRootIndex: 2896 case kNumberStringCacheRootIndex:
2860 case kInstanceofCacheFunctionRootIndex: 2897 case kInstanceofCacheFunctionRootIndex:
2861 case kInstanceofCacheMapRootIndex: 2898 case kInstanceofCacheMapRootIndex:
2862 case kInstanceofCacheAnswerRootIndex: 2899 case kInstanceofCacheAnswerRootIndex:
2863 case kCodeStubsRootIndex: 2900 case kCodeStubsRootIndex:
2864 case kNonMonomorphicCacheRootIndex: 2901 case kNonMonomorphicCacheRootIndex:
2865 case kPolymorphicCodeCacheRootIndex: 2902 case kPolymorphicCodeCacheRootIndex:
2866 case kEmptyScriptRootIndex: 2903 case kEmptyScriptRootIndex:
2867 case kSymbolRegistryRootIndex: 2904 case kSymbolRegistryRootIndex:
2868 case kScriptListRootIndex: 2905 case kScriptListRootIndex:
2869 case kMaterializedObjectsRootIndex: 2906 case kMaterializedObjectsRootIndex:
2870 case kAllocationSitesScratchpadRootIndex:
2871 case kMicrotaskQueueRootIndex: 2907 case kMicrotaskQueueRootIndex:
2872 case kDetachedContextsRootIndex: 2908 case kDetachedContextsRootIndex:
2873 case kWeakObjectToCodeTableRootIndex: 2909 case kWeakObjectToCodeTableRootIndex:
2874 case kRetainedMapsRootIndex: 2910 case kRetainedMapsRootIndex:
2875 case kNoScriptSharedFunctionInfosRootIndex: 2911 case kNoScriptSharedFunctionInfosRootIndex:
2876 case kWeakStackTraceListRootIndex: 2912 case kWeakStackTraceListRootIndex:
2877 // Smi values 2913 // Smi values
2878 #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex: 2914 #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
2879 SMI_ROOT_LIST(SMI_ENTRY) 2915 SMI_ROOT_LIST(SMI_ENTRY)
2880 #undef SMI_ENTRY 2916 #undef SMI_ENTRY
(...skipping 28 matching lines...) Expand all
2909 2945
2910 void Heap::FlushNumberStringCache() { 2946 void Heap::FlushNumberStringCache() {
2911 // Flush the number to string cache. 2947 // Flush the number to string cache.
2912 int len = number_string_cache()->length(); 2948 int len = number_string_cache()->length();
2913 for (int i = 0; i < len; i++) { 2949 for (int i = 0; i < len; i++) {
2914 number_string_cache()->set_undefined(i); 2950 number_string_cache()->set_undefined(i);
2915 } 2951 }
2916 } 2952 }
2917 2953
2918 2954
2919 void Heap::FlushAllocationSitesScratchpad() {
2920 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
2921 allocation_sites_scratchpad()->set_undefined(i);
2922 }
2923 allocation_sites_scratchpad_length_ = 0;
2924 }
2925
2926
2927 void Heap::InitializeAllocationSitesScratchpad() {
2928 DCHECK(allocation_sites_scratchpad()->length() ==
2929 kAllocationSiteScratchpadSize);
2930 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
2931 allocation_sites_scratchpad()->set_undefined(i);
2932 }
2933 }
2934
2935
2936 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
2937 ScratchpadSlotMode mode) {
2938 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
2939 // We cannot use the normal write-barrier because slots need to be
2940 // recorded with non-incremental marking as well. We have to explicitly
2941 // record the slot to take evacuation candidates into account.
2942 allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
2943 site, SKIP_WRITE_BARRIER);
2944 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
2945 allocation_sites_scratchpad_length_);
2946
2947 if (mode == RECORD_SCRATCHPAD_SLOT) {
2948 // We need to allow slots buffer overflow here since the evacuation
2949 // candidates are not part of the global list of old space pages and
2950 // releasing an evacuation candidate due to a slots buffer overflow
2951 // results in lost pages.
2952 mark_compact_collector()->ForceRecordSlot(allocation_sites_scratchpad(),
2953 slot, *slot);
2954 }
2955 allocation_sites_scratchpad_length_++;
2956 }
2957 }
2958
2959
2960
2961 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) { 2955 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
2962 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]); 2956 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
2963 } 2957 }
2964 2958
2965 2959
2966 Heap::RootListIndex Heap::RootIndexForFixedTypedArray( 2960 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
2967 ExternalArrayType array_type) { 2961 ExternalArrayType array_type) {
2968 switch (array_type) { 2962 switch (array_type) {
2969 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ 2963 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
2970 case kExternal##Type##Array: \ 2964 case kExternal##Type##Array: \
(...skipping 3264 matching lines...) Expand 10 before | Expand all | Expand 10 after
6235 } 6229 }
6236 6230
6237 6231
6238 // static 6232 // static
6239 int Heap::GetStaticVisitorIdForMap(Map* map) { 6233 int Heap::GetStaticVisitorIdForMap(Map* map) {
6240 return StaticVisitorBase::GetVisitorId(map); 6234 return StaticVisitorBase::GetVisitorId(map);
6241 } 6235 }
6242 6236
6243 } // namespace internal 6237 } // namespace internal
6244 } // namespace v8 6238 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698