Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: src/heap/heap.h

Issue 1304873006: [heap] Enforce coding style decl order in {Heap} round #3. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@local_cleanup-heap-store-buffer-api
Patch Set: Rebased. Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_HEAP_H_ 5 #ifndef V8_HEAP_HEAP_H_
6 #define V8_HEAP_HEAP_H_ 6 #define V8_HEAP_HEAP_H_
7 7
8 #include <cmath> 8 #include <cmath>
9 #include <map> 9 #include <map>
10 10
(...skipping 806 matching lines...) Expand 10 before | Expand all | Expand 10 after
817 static double HeapGrowingFactor(double gc_speed, double mutator_speed); 817 static double HeapGrowingFactor(double gc_speed, double mutator_speed);
818 818
819 // Copy block of memory from src to dst. Size of block should be aligned 819 // Copy block of memory from src to dst. Size of block should be aligned
820 // by pointer size. 820 // by pointer size.
821 static inline void CopyBlock(Address dst, Address src, int byte_size); 821 static inline void CopyBlock(Address dst, Address src, int byte_size);
822 822
823 // Optimized version of memmove for blocks with pointer size aligned sizes and 823 // Optimized version of memmove for blocks with pointer size aligned sizes and
824 // pointer size aligned addresses. 824 // pointer size aligned addresses.
825 static inline void MoveBlock(Address dst, Address src, int byte_size); 825 static inline void MoveBlock(Address dst, Address src, int byte_size);
826 826
827 // Set the stack limit in the roots_ array. Some architectures generate
828 // code that looks here, because it is faster than loading from the static
829 // jslimit_/real_jslimit_ variable in the StackGuard.
830 void SetStackLimits();
831
832 // Notifies the heap that is ok to start marking or other activities that 827 // Notifies the heap that is ok to start marking or other activities that
833 // should not happen during deserialization. 828 // should not happen during deserialization.
834 void NotifyDeserializationComplete(); 829 void NotifyDeserializationComplete();
835 830
836 intptr_t old_generation_allocation_limit() const { 831 intptr_t old_generation_allocation_limit() const {
837 return old_generation_allocation_limit_; 832 return old_generation_allocation_limit_;
838 } 833 }
839 834
840 bool always_allocate() { return always_allocate_scope_depth_ != 0; } 835 bool always_allocate() { return always_allocate_scope_depth_ != 0; }
841 Address always_allocate_scope_depth_address() { 836 Address always_allocate_scope_depth_address() {
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
912 } 907 }
913 } 908 }
914 909
915 inline void decrement_scan_on_scavenge_pages() { 910 inline void decrement_scan_on_scavenge_pages() {
916 scan_on_scavenge_pages_--; 911 scan_on_scavenge_pages_--;
917 if (FLAG_gc_verbose) { 912 if (FLAG_gc_verbose) {
918 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); 913 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
919 } 914 }
920 } 915 }
921 916
922 // Heap root getters. We have versions with and without type::cast() here.
923 // You can't use type::cast during GC because the assert fails.
924 // TODO(1490): Try removing the unchecked accessors, now that GC marking does
925 // not corrupt the map.
926 #define ROOT_ACCESSOR(type, name, camel_name) \
927 inline type* name(); \
928 type* raw_unchecked_##name() { \
929 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
930 }
931 ROOT_LIST(ROOT_ACCESSOR)
932 #undef ROOT_ACCESSOR
933
934 // Utility type maps
935 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
936 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
937 #undef STRUCT_MAP_ACCESSOR
938
939 #define STRING_ACCESSOR(name, str) inline String* name();
940 INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
941 #undef STRING_ACCESSOR
942
943 #define SYMBOL_ACCESSOR(name) inline Symbol* name();
944 PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
945 #undef SYMBOL_ACCESSOR
946
947 #define SYMBOL_ACCESSOR(name, varname, description) inline Symbol* name();
948 PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
949 #undef SYMBOL_ACCESSOR
950
951 void set_native_contexts_list(Object* object) { 917 void set_native_contexts_list(Object* object) {
952 native_contexts_list_ = object; 918 native_contexts_list_ = object;
953 } 919 }
954 Object* native_contexts_list() const { return native_contexts_list_; } 920 Object* native_contexts_list() const { return native_contexts_list_; }
955 921
956 void set_allocation_sites_list(Object* object) { 922 void set_allocation_sites_list(Object* object) {
957 allocation_sites_list_ = object; 923 allocation_sites_list_ = object;
958 } 924 }
959 Object* allocation_sites_list() { return allocation_sites_list_; } 925 Object* allocation_sites_list() { return allocation_sites_list_; }
960 926
(...skipping 12 matching lines...) Expand all
973 } 939 }
974 Object* encountered_weak_cells() const { return encountered_weak_cells_; } 940 Object* encountered_weak_cells() const { return encountered_weak_cells_; }
975 941
976 // Number of mark-sweeps. 942 // Number of mark-sweeps.
977 int ms_count() const { return ms_count_; } 943 int ms_count() const { return ms_count_; }
978 944
979 // Checks whether the given object is allowed to be migrated from it's 945 // Checks whether the given object is allowed to be migrated from it's
980 // current space into the given destination space. Used for debugging. 946 // current space into the given destination space. Used for debugging.
981 inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); 947 inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
982 948
983 // Sets the stub_cache_ (only used when expanding the dictionary).
984 void public_set_code_stubs(UnseededNumberDictionary* value) {
985 roots_[kCodeStubsRootIndex] = value;
986 }
987
988 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
989 void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
990 roots_[kNonMonomorphicCacheRootIndex] = value;
991 }
992
993 void public_set_empty_script(Script* script) {
994 roots_[kEmptyScriptRootIndex] = script;
995 }
996
997 void public_set_materialized_objects(FixedArray* objects) {
998 roots_[kMaterializedObjectsRootIndex] = objects;
999 }
1000
1001 // Generated code can embed this address to get access to the roots.
1002 Object** roots_array_start() { return roots_; }
1003
1004 void CheckHandleCount(); 949 void CheckHandleCount();
1005 950
1006 // Number of "runtime allocations" done so far. 951 // Number of "runtime allocations" done so far.
1007 uint32_t allocations_count() { return allocations_count_; } 952 uint32_t allocations_count() { return allocations_count_; }
1008 953
1009 // Returns deterministic "time" value in ms. Works only with 954 // Returns deterministic "time" value in ms. Works only with
1010 // FLAG_verify_predictable. 955 // FLAG_verify_predictable.
1011 double synthetic_time() { return allocations_count() / 2.0; } 956 double synthetic_time() { return allocations_count() / 2.0; }
1012 957
1013 // Print short heap statistics. 958 // Print short heap statistics.
(...skipping 23 matching lines...) Expand all
1037 // 982 //
1038 983
1039 void CreateApiObjects(); 984 void CreateApiObjects();
1040 985
1041 // Implements the corresponding V8 API function. 986 // Implements the corresponding V8 API function.
1042 bool IdleNotification(double deadline_in_seconds); 987 bool IdleNotification(double deadline_in_seconds);
1043 bool IdleNotification(int idle_time_in_ms); 988 bool IdleNotification(int idle_time_in_ms);
1044 989
1045 double MonotonicallyIncreasingTimeInMs(); 990 double MonotonicallyIncreasingTimeInMs();
1046 991
1047 Object* root(RootListIndex index) { return roots_[index]; }
1048
1049 // Generated code can treat direct references to this root as constant.
1050 bool RootCanBeTreatedAsConstant(RootListIndex root_index);
1051
1052 Map* MapForFixedTypedArray(ExternalArrayType array_type);
1053 RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
1054
1055 RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
1056 FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
1057
1058 void RecordStats(HeapStats* stats, bool take_snapshot = false); 992 void RecordStats(HeapStats* stats, bool take_snapshot = false);
1059 993
1060 // Check new space expansion criteria and expand semispaces if it was hit. 994 // Check new space expansion criteria and expand semispaces if it was hit.
1061 void CheckNewSpaceExpansionCriteria(); 995 void CheckNewSpaceExpansionCriteria();
1062 996
1063 inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) { 997 inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
1064 if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true; 998 if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
1065 999
1066 intptr_t adjusted_allocation_limit = limit - new_space_.Capacity(); 1000 intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
1067 1001
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
1149 object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++; 1083 object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
1150 object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size; 1084 object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
1151 } 1085 }
1152 1086
1153 void TraceObjectStats(); 1087 void TraceObjectStats();
1154 void TraceObjectStat(const char* name, int count, int size, double time); 1088 void TraceObjectStat(const char* name, int count, int size, double time);
1155 void CheckpointObjectStats(); 1089 void CheckpointObjectStats();
1156 bool GetObjectTypeName(size_t index, const char** object_type, 1090 bool GetObjectTypeName(size_t index, const char** object_type,
1157 const char** object_sub_type); 1091 const char** object_sub_type);
1158 1092
1159 void RegisterStrongRoots(Object** start, Object** end);
1160 void UnregisterStrongRoots(Object** start);
1161
1162 void AddWeakObjectToCodeDependency(Handle<HeapObject> obj, 1093 void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
1163 Handle<DependentCode> dep); 1094 Handle<DependentCode> dep);
1164 1095
1165 DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj); 1096 DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
1166 1097
1167 void AddRetainedMap(Handle<Map> map); 1098 void AddRetainedMap(Handle<Map> map);
1168 1099
1169 // This event is triggered after successful allocation of a new object made 1100 // This event is triggered after successful allocation of a new object made
1170 // by runtime. Allocations of target space for object evacuation do not 1101 // by runtime. Allocations of target space for object evacuation do not
1171 // trigger the event. In order to track ALL allocations one must turn off 1102 // trigger the event. In order to track ALL allocations one must turn off
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
1287 1218
1288 MarkCompactCollector* mark_compact_collector() { 1219 MarkCompactCollector* mark_compact_collector() {
1289 return &mark_compact_collector_; 1220 return &mark_compact_collector_;
1290 } 1221 }
1291 1222
1292 ExternalStringTable* external_string_table() { 1223 ExternalStringTable* external_string_table() {
1293 return &external_string_table_; 1224 return &external_string_table_;
1294 } 1225 }
1295 1226
1296 // =========================================================================== 1227 // ===========================================================================
1228 // Root set access. ==========================================================
1229 // ===========================================================================
1230
1231 // Heap root getters. We have versions with and without type::cast() here.
1232 // You can't use type::cast during GC because the assert fails.
1233 // TODO(1490): Try removing the unchecked accessors, now that GC marking does
1234 // not corrupt the map.
1235 #define ROOT_ACCESSOR(type, name, camel_name) \
1236 inline type* name(); \
1237 type* raw_unchecked_##name() { \
1238 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
1239 }
1240 ROOT_LIST(ROOT_ACCESSOR)
1241 #undef ROOT_ACCESSOR
1242
1243 // Utility type maps.
1244 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
1245 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
1246 #undef STRUCT_MAP_ACCESSOR
1247
1248 #define STRING_ACCESSOR(name, str) inline String* name();
1249 INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
1250 #undef STRING_ACCESSOR
1251
1252 #define SYMBOL_ACCESSOR(name) inline Symbol* name();
1253 PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
1254 #undef SYMBOL_ACCESSOR
1255
1256 #define SYMBOL_ACCESSOR(name, varname, description) inline Symbol* name();
1257 PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
1258 #undef SYMBOL_ACCESSOR
1259
1260 Object* root(RootListIndex index) { return roots_[index]; }
1261
1262 // Generated code can embed this address to get access to the roots.
1263 Object** roots_array_start() { return roots_; }
1264
1265 // Sets the stub_cache_ (only used when expanding the dictionary).
1266 void public_set_code_stubs(UnseededNumberDictionary* value) {
1267 roots_[kCodeStubsRootIndex] = value;
1268 }
1269
1270 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
1271 void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
1272 roots_[kNonMonomorphicCacheRootIndex] = value;
1273 }
1274
1275 void public_set_empty_script(Script* script) {
1276 roots_[kEmptyScriptRootIndex] = script;
1277 }
1278
1279 void public_set_materialized_objects(FixedArray* objects) {
1280 roots_[kMaterializedObjectsRootIndex] = objects;
1281 }
1282
1283 // Set the stack limit in the roots_ array. Some architectures generate
1284 // code that looks here, because it is faster than loading from the static
1285 // jslimit_/real_jslimit_ variable in the StackGuard.
1286 void SetStackLimits();
1287
1288 // Generated code can treat direct references to this root as constant.
1289 bool RootCanBeTreatedAsConstant(RootListIndex root_index);
1290
1291 Map* MapForFixedTypedArray(ExternalArrayType array_type);
1292 RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
1293
1294 RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
1295 FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
1296
1297 void RegisterStrongRoots(Object** start, Object** end);
1298 void UnregisterStrongRoots(Object** start);
1299
1300 // ===========================================================================
1297 // Inline allocation. ======================================================== 1301 // Inline allocation. ========================================================
1298 // =========================================================================== 1302 // ===========================================================================
1299 1303
1300 // Indicates whether inline bump-pointer allocation has been disabled. 1304 // Indicates whether inline bump-pointer allocation has been disabled.
1301 bool inline_allocation_disabled() { return inline_allocation_disabled_; } 1305 bool inline_allocation_disabled() { return inline_allocation_disabled_; }
1302 1306
1303 // Switch whether inline bump-pointer allocation should be used. 1307 // Switch whether inline bump-pointer allocation should be used.
1304 void EnableInlineAllocation(); 1308 void EnableInlineAllocation();
1305 void DisableInlineAllocation(); 1309 void DisableInlineAllocation();
1306 1310
(...skipping 1472 matching lines...) Expand 10 before | Expand all | Expand 10 after
2779 DisallowHeapAllocation no_allocation; // i.e. no gc allowed. 2783 DisallowHeapAllocation no_allocation; // i.e. no gc allowed.
2780 2784
2781 private: 2785 private:
2782 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); 2786 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
2783 }; 2787 };
2784 #endif // DEBUG 2788 #endif // DEBUG
2785 } 2789 }
2786 } // namespace v8::internal 2790 } // namespace v8::internal
2787 2791
2788 #endif // V8_HEAP_HEAP_H_ 2792 #endif // V8_HEAP_HEAP_H_
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698