Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(397)

Side by Side Diff: src/heap/heap.h

Issue 1301583003: Make heap.h usable without objects-inl.h header. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@local_cleanup-includes-heap-3
Patch Set: Simplify scavenger dispatch. Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | src/heap/heap.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_HEAP_H_ 5 #ifndef V8_HEAP_HEAP_H_
6 #define V8_HEAP_HEAP_H_ 6 #define V8_HEAP_HEAP_H_
7 7
8 #include <cmath> 8 #include <cmath>
9 #include <map> 9 #include <map>
10 10
(...skipping 877 matching lines...) Expand 10 before | Expand all | Expand 10 after
888 void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback); 888 void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
889 889
890 void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, 890 void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
891 GCType gc_type_filter, bool pass_isolate = true); 891 GCType gc_type_filter, bool pass_isolate = true);
892 void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback); 892 void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
893 893
894 // Heap root getters. We have versions with and without type::cast() here. 894 // Heap root getters. We have versions with and without type::cast() here.
895 // You can't use type::cast during GC because the assert fails. 895 // You can't use type::cast during GC because the assert fails.
896 // TODO(1490): Try removing the unchecked accessors, now that GC marking does 896 // TODO(1490): Try removing the unchecked accessors, now that GC marking does
897 // not corrupt the map. 897 // not corrupt the map.
898 #define ROOT_ACCESSOR(type, name, camel_name) \ 898 #define ROOT_ACCESSOR(type, name, camel_name) \
899 type* name() { return type::cast(roots_[k##camel_name##RootIndex]); } \ 899 inline type* name(); \
900 type* raw_unchecked_##name() { \ 900 type* raw_unchecked_##name() { \
901 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \ 901 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
902 } 902 }
903 ROOT_LIST(ROOT_ACCESSOR) 903 ROOT_LIST(ROOT_ACCESSOR)
904 #undef ROOT_ACCESSOR 904 #undef ROOT_ACCESSOR
905 905
906 // Utility type maps 906 // Utility type maps
907 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) \ 907 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
908 Map* name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
909 STRUCT_LIST(STRUCT_MAP_ACCESSOR) 908 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
910 #undef STRUCT_MAP_ACCESSOR 909 #undef STRUCT_MAP_ACCESSOR
911 910
912 #define STRING_ACCESSOR(name, str) \ 911 #define STRING_ACCESSOR(name, str) inline String* name();
913 String* name() { return String::cast(roots_[k##name##RootIndex]); }
914 INTERNALIZED_STRING_LIST(STRING_ACCESSOR) 912 INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
915 #undef STRING_ACCESSOR 913 #undef STRING_ACCESSOR
916 914
917 #define SYMBOL_ACCESSOR(name) \ 915 #define SYMBOL_ACCESSOR(name) inline Symbol* name();
918 Symbol* name() { return Symbol::cast(roots_[k##name##RootIndex]); }
919 PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR) 916 PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
920 #undef SYMBOL_ACCESSOR 917 #undef SYMBOL_ACCESSOR
921 918
922 #define SYMBOL_ACCESSOR(name, varname, description) \ 919 #define SYMBOL_ACCESSOR(name, varname, description) inline Symbol* name();
923 Symbol* name() { return Symbol::cast(roots_[k##name##RootIndex]); }
924 PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR) 920 PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
925 #undef SYMBOL_ACCESSOR 921 #undef SYMBOL_ACCESSOR
926 922
927 // The hidden_string is special because it is the empty string, but does 923 // The hidden_string is special because it is the empty string, but does
928 // not match the empty string. 924 // not match the empty string.
929 String* hidden_string() { return hidden_string_; } 925 String* hidden_string() { return hidden_string_; }
930 926
931 void set_native_contexts_list(Object* object) { 927 void set_native_contexts_list(Object* object) {
932 native_contexts_list_ = object; 928 native_contexts_list_ = object;
933 } 929 }
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
1095 void TracePathToGlobal(); 1091 void TracePathToGlobal();
1096 #endif 1092 #endif
1097 1093
1098 // Callback function passed to Heap::Iterate etc. Copies an object if 1094 // Callback function passed to Heap::Iterate etc. Copies an object if
1099 // necessary, the object might be promoted to an old space. The caller must 1095 // necessary, the object might be promoted to an old space. The caller must
1100 // ensure the precondition that the object is (a) a heap object and (b) in 1096 // ensure the precondition that the object is (a) a heap object and (b) in
1101 // the heap's from space. 1097 // the heap's from space.
1102 static inline void ScavengePointer(HeapObject** p); 1098 static inline void ScavengePointer(HeapObject** p);
1103 static inline void ScavengeObject(HeapObject** p, HeapObject* object); 1099 static inline void ScavengeObject(HeapObject** p, HeapObject* object);
1104 1100
1101 // Slow part of scavenge object.
1102 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
1103
1105 enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT }; 1104 enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
1106 1105
1107 // If an object has an AllocationMemento trailing it, return it, otherwise 1106 // If an object has an AllocationMemento trailing it, return it, otherwise
1108 // return NULL; 1107 // return NULL;
1109 inline AllocationMemento* FindAllocationMemento(HeapObject* object); 1108 inline AllocationMemento* FindAllocationMemento(HeapObject* object);
1110 1109
1111 // An object may have an AllocationSite associated with it through a trailing 1110 // An object may have an AllocationSite associated with it through a trailing
1112 // AllocationMemento. Its feedback should be updated when objects are found 1111 // AllocationMemento. Its feedback should be updated when objects are found
1113 // in the heap. 1112 // in the heap.
1114 static inline void UpdateAllocationSiteFeedback(HeapObject* object, 1113 static inline void UpdateAllocationSiteFeedback(HeapObject* object,
(...skipping 311 matching lines...) Expand 10 before | Expand all | Expand 10 after
1426 1425
1427 bool concurrent_sweeping_enabled() { return concurrent_sweeping_enabled_; } 1426 bool concurrent_sweeping_enabled() { return concurrent_sweeping_enabled_; }
1428 1427
1429 inline Isolate* isolate(); 1428 inline Isolate* isolate();
1430 1429
1431 void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); 1430 void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
1432 void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags); 1431 void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1433 1432
1434 inline bool OldGenerationAllocationLimitReached(); 1433 inline bool OldGenerationAllocationLimitReached();
1435 1434
1436 inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1437 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1438 }
1439
1440 void QueueMemoryChunkForFree(MemoryChunk* chunk); 1435 void QueueMemoryChunkForFree(MemoryChunk* chunk);
1441 void FreeQueuedChunks(); 1436 void FreeQueuedChunks();
1442 1437
1443 int gc_count() const { return gc_count_; } 1438 int gc_count() const { return gc_count_; }
1444 1439
1445 bool RecentIdleNotificationHappened(); 1440 bool RecentIdleNotificationHappened();
1446 1441
1447 // Completely clear the Instanceof cache (to stop it keeping objects alive 1442 // Completely clear the Instanceof cache (to stop it keeping objects alive
1448 // around a GC). 1443 // around a GC).
1449 inline void CompletelyClearInstanceofCache(); 1444 inline void CompletelyClearInstanceofCache();
1450 1445
1451 // The roots that have an index less than this are always in old space. 1446 // The roots that have an index less than this are always in old space.
1452 static const int kOldSpaceRoots = 0x20; 1447 static const int kOldSpaceRoots = 0x20;
1453 1448
1454 uint32_t HashSeed() { 1449 inline uint32_t HashSeed();
1455 uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
1456 DCHECK(FLAG_randomize_hashes || seed == 0);
1457 return seed;
1458 }
1459 1450
1460 Smi* NextScriptId() { 1451 inline Smi* NextScriptId();
1461 int next_id = last_script_id()->value() + 1;
1462 if (!Smi::IsValid(next_id) || next_id < 0) next_id = 1;
1463 Smi* next_id_smi = Smi::FromInt(next_id);
1464 set_last_script_id(next_id_smi);
1465 return next_id_smi;
1466 }
1467 1452
1468 void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) { 1453 inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
1469 DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0)); 1454 inline void SetConstructStubDeoptPCOffset(int pc_offset);
1470 set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset)); 1455 inline void SetGetterStubDeoptPCOffset(int pc_offset);
1471 } 1456 inline void SetSetterStubDeoptPCOffset(int pc_offset);
1472
1473 void SetConstructStubDeoptPCOffset(int pc_offset) {
1474 DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
1475 set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1476 }
1477
1478 void SetGetterStubDeoptPCOffset(int pc_offset) {
1479 DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
1480 set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1481 }
1482
1483 void SetSetterStubDeoptPCOffset(int pc_offset) {
1484 DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
1485 set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1486 }
1487 1457
1488 // For post mortem debugging. 1458 // For post mortem debugging.
1489 void RememberUnmappedPage(Address page, bool compacted); 1459 void RememberUnmappedPage(Address page, bool compacted);
1490 1460
1491 // Global inline caching age: it is incremented on some GCs after context 1461 // Global inline caching age: it is incremented on some GCs after context
1492 // disposal. We use it to flush inline caches. 1462 // disposal. We use it to flush inline caches.
1493 int global_ic_age() { return global_ic_age_; } 1463 int global_ic_age() { return global_ic_age_; }
1494 1464
1495 void AgeInlineCaches() { 1465 void AgeInlineCaches() {
1496 global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax; 1466 global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
(...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after
1773 unsigned int gc_count_; 1743 unsigned int gc_count_;
1774 1744
1775 // For post mortem debugging. 1745 // For post mortem debugging.
1776 static const int kRememberedUnmappedPages = 128; 1746 static const int kRememberedUnmappedPages = 128;
1777 int remembered_unmapped_pages_index_; 1747 int remembered_unmapped_pages_index_;
1778 Address remembered_unmapped_pages_[kRememberedUnmappedPages]; 1748 Address remembered_unmapped_pages_[kRememberedUnmappedPages];
1779 1749
1780 // Total length of the strings we failed to flatten since the last GC. 1750 // Total length of the strings we failed to flatten since the last GC.
1781 int unflattened_strings_length_; 1751 int unflattened_strings_length_;
1782 1752
1783 #define ROOT_ACCESSOR(type, name, camel_name) \ 1753 #define ROOT_ACCESSOR(type, name, camel_name) \
1784 inline void set_##name(type* value) { \ 1754 inline void set_##name(type* value);
1785 /* The deserializer makes use of the fact that these common roots are */ \
1786 /* never in new space and never on a page that is being compacted. */ \
1787 DCHECK(!deserialization_complete() || \
1788 RootCanBeWrittenAfterInitialization(k##camel_name##RootIndex)); \
1789 DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
1790 roots_[k##camel_name##RootIndex] = value; \
1791 }
1792 ROOT_LIST(ROOT_ACCESSOR) 1755 ROOT_LIST(ROOT_ACCESSOR)
1793 #undef ROOT_ACCESSOR 1756 #undef ROOT_ACCESSOR
1794 1757
1795 #ifdef DEBUG 1758 #ifdef DEBUG
1796 // If the --gc-interval flag is set to a positive value, this 1759 // If the --gc-interval flag is set to a positive value, this
1797 // variable holds the value indicating the number of allocations 1760 // variable holds the value indicating the number of allocations
1798 // remain until the next failure and garbage collection. 1761 // remain until the next failure and garbage collection.
1799 int allocation_timeout_; 1762 int allocation_timeout_;
1800 #endif // DEBUG 1763 #endif // DEBUG
1801 1764
(...skipping 347 matching lines...) Expand 10 before | Expand all | Expand 10 after
2149 Isolate* isolate, std::map<void*, size_t>& live_buffers, 2112 Isolate* isolate, std::map<void*, size_t>& live_buffers,
2150 std::map<void*, size_t>& not_yet_discovered_buffers); 2113 std::map<void*, size_t>& not_yet_discovered_buffers);
2151 void TearDownArrayBuffersHelper( 2114 void TearDownArrayBuffersHelper(
2152 Isolate* isolate, std::map<void*, size_t>& live_buffers, 2115 Isolate* isolate, std::map<void*, size_t>& live_buffers,
2153 std::map<void*, size_t>& not_yet_discovered_buffers); 2116 std::map<void*, size_t>& not_yet_discovered_buffers);
2154 2117
2155 // Record statistics before and after garbage collection. 2118 // Record statistics before and after garbage collection.
2156 void ReportStatisticsBeforeGC(); 2119 void ReportStatisticsBeforeGC();
2157 void ReportStatisticsAfterGC(); 2120 void ReportStatisticsAfterGC();
2158 2121
2159 // Slow part of scavenge object.
2160 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
2161
2162 // Total RegExp code ever generated 2122 // Total RegExp code ever generated
2163 double total_regexp_code_generated_; 2123 double total_regexp_code_generated_;
2164 2124
2165 int deferred_counters_[v8::Isolate::kUseCounterFeatureCount]; 2125 int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
2166 2126
2167 GCTracer tracer_; 2127 GCTracer tracer_;
2168 2128
2169 // Creates and installs the full-sized number string cache. 2129 // Creates and installs the full-sized number string cache.
2170 int FullSizeNumberStringCacheLength(); 2130 int FullSizeNumberStringCacheLength();
2171 // Flush the number to string cache. 2131 // Flush the number to string cache.
(...skipping 591 matching lines...) Expand 10 before | Expand all | Expand 10 after
2763 DisallowHeapAllocation no_allocation; // i.e. no gc allowed. 2723 DisallowHeapAllocation no_allocation; // i.e. no gc allowed.
2764 2724
2765 private: 2725 private:
2766 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); 2726 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
2767 }; 2727 };
2768 #endif // DEBUG 2728 #endif // DEBUG
2769 } 2729 }
2770 } // namespace v8::internal 2730 } // namespace v8::internal
2771 2731
2772 #endif // V8_HEAP_HEAP_H_ 2732 #endif // V8_HEAP_HEAP_H_
OLDNEW
« no previous file with comments | « no previous file | src/heap/heap.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698