Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(391)

Side by Side Diff: src/heap.h

Issue 7945009: Merge experimental/gc branch to the bleeding_edge. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/frames.cc ('k') | src/heap.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 14 matching lines...) Expand all
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #ifndef V8_HEAP_H_ 28 #ifndef V8_HEAP_H_
29 #define V8_HEAP_H_ 29 #define V8_HEAP_H_
30 30
31 #include <math.h> 31 #include <math.h>
32 32
33 #include "allocation.h" 33 #include "allocation.h"
34 #include "globals.h" 34 #include "globals.h"
35 #include "incremental-marking.h"
35 #include "list.h" 36 #include "list.h"
36 #include "mark-compact.h" 37 #include "mark-compact.h"
38 #include "objects-visiting.h"
37 #include "spaces.h" 39 #include "spaces.h"
38 #include "splay-tree-inl.h" 40 #include "splay-tree-inl.h"
41 #include "store-buffer.h"
39 #include "v8-counters.h" 42 #include "v8-counters.h"
43 #include "v8globals.h"
40 44
41 namespace v8 { 45 namespace v8 {
42 namespace internal { 46 namespace internal {
43 47
44 // TODO(isolates): remove HEAP here 48 // TODO(isolates): remove HEAP here
45 #define HEAP (_inline_get_heap_()) 49 #define HEAP (_inline_get_heap_())
46 class Heap; 50 class Heap;
47 inline Heap* _inline_get_heap_(); 51 inline Heap* _inline_get_heap_();
48 52
49 53
50 // Defines all the roots in Heap. 54 // Defines all the roots in Heap.
51 #define STRONG_ROOT_LIST(V) \ 55 #define STRONG_ROOT_LIST(V) \
52 /* Put the byte array map early. We need it to be in place by the time */ \
53 /* the deserializer hits the next page, since it wants to put a byte */ \
54 /* array in the unused space at the end of the page. */ \
55 V(Map, byte_array_map, ByteArrayMap) \ 56 V(Map, byte_array_map, ByteArrayMap) \
57 V(Map, free_space_map, FreeSpaceMap) \
56 V(Map, one_pointer_filler_map, OnePointerFillerMap) \ 58 V(Map, one_pointer_filler_map, OnePointerFillerMap) \
57 V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ 59 V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
58 /* Cluster the most popular ones in a few cache lines here at the top. */ \ 60 /* Cluster the most popular ones in a few cache lines here at the top. */ \
61 V(Smi, store_buffer_top, StoreBufferTop) \
59 V(Object, undefined_value, UndefinedValue) \ 62 V(Object, undefined_value, UndefinedValue) \
60 V(Object, the_hole_value, TheHoleValue) \ 63 V(Object, the_hole_value, TheHoleValue) \
61 V(Object, null_value, NullValue) \ 64 V(Object, null_value, NullValue) \
62 V(Object, true_value, TrueValue) \ 65 V(Object, true_value, TrueValue) \
63 V(Object, false_value, FalseValue) \ 66 V(Object, false_value, FalseValue) \
64 V(Object, arguments_marker, ArgumentsMarker) \ 67 V(Object, arguments_marker, ArgumentsMarker) \
65 V(Map, heap_number_map, HeapNumberMap) \ 68 V(Map, heap_number_map, HeapNumberMap) \
66 V(Map, global_context_map, GlobalContextMap) \ 69 V(Map, global_context_map, GlobalContextMap) \
67 V(Map, fixed_array_map, FixedArrayMap) \ 70 V(Map, fixed_array_map, FixedArrayMap) \
68 V(Map, serialized_scope_info_map, SerializedScopeInfoMap) \ 71 V(Map, serialized_scope_info_map, SerializedScopeInfoMap) \
(...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after
231 // Forward declarations. 234 // Forward declarations.
232 class GCTracer; 235 class GCTracer;
233 class HeapStats; 236 class HeapStats;
234 class Isolate; 237 class Isolate;
235 class WeakObjectRetainer; 238 class WeakObjectRetainer;
236 239
237 240
238 typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, 241 typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
239 Object** pointer); 242 Object** pointer);
240 243
241 typedef bool (*DirtyRegionCallback)(Heap* heap, 244 class StoreBufferRebuilder {
242 Address start, 245 public:
243 Address end, 246 explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
244 ObjectSlotCallback copy_object_func); 247 : store_buffer_(store_buffer) {
248 }
249
250 void Callback(MemoryChunk* page, StoreBufferEvent event);
251
252 private:
253 StoreBuffer* store_buffer_;
254
255 // We record in this variable how full the store buffer was when we started
256 // iterating over the current page, finding pointers to new space. If the
257 // store buffer overflows again we can exempt the page from the store buffer
258 // by rewinding to this point instead of having to search the store buffer.
259 Object*** start_of_current_page_;
260 // The current page we are scanning in the store buffer iterator.
261 MemoryChunk* current_page_;
262 };
263
245 264
246 265
247 // The all static Heap captures the interface to the global object heap. 266 // The all static Heap captures the interface to the global object heap.
248 // All JavaScript contexts by this process share the same object heap. 267 // All JavaScript contexts by this process share the same object heap.
249 268
250 #ifdef DEBUG 269 #ifdef DEBUG
251 class HeapDebugUtils; 270 class HeapDebugUtils;
252 #endif 271 #endif
253 272
254 273
255 // A queue of objects promoted during scavenge. Each object is accompanied 274 // A queue of objects promoted during scavenge. Each object is accompanied
256 // by it's size to avoid dereferencing a map pointer for scanning. 275 // by it's size to avoid dereferencing a map pointer for scanning.
257 class PromotionQueue { 276 class PromotionQueue {
258 public: 277 public:
259 PromotionQueue() : front_(NULL), rear_(NULL) { } 278 PromotionQueue() : front_(NULL), rear_(NULL) { }
260 279
261 void Initialize(Address start_address) { 280 void Initialize(Address start_address) {
281 // Assumes that a NewSpacePage exactly fits a number of promotion queue
282 // entries (where each is a pair of intptr_t). This allows us to simplify
283 // the test fpr when to switch pages.
284 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
285 == 0);
286 ASSERT(NewSpacePage::IsAtEnd(start_address));
262 front_ = rear_ = reinterpret_cast<intptr_t*>(start_address); 287 front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
263 } 288 }
264 289
265 bool is_empty() { return front_ <= rear_; } 290 bool is_empty() { return front_ == rear_; }
266 291
267 inline void insert(HeapObject* target, int size); 292 inline void insert(HeapObject* target, int size);
268 293
269 void remove(HeapObject** target, int* size) { 294 void remove(HeapObject** target, int* size) {
295 ASSERT(!is_empty());
296 if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
297 NewSpacePage* front_page =
298 NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
299 ASSERT(!front_page->prev_page()->is_anchor());
300 front_ =
301 reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit());
302 }
270 *target = reinterpret_cast<HeapObject*>(*(--front_)); 303 *target = reinterpret_cast<HeapObject*>(*(--front_));
271 *size = static_cast<int>(*(--front_)); 304 *size = static_cast<int>(*(--front_));
272 // Assert no underflow. 305 // Assert no underflow.
273 ASSERT(front_ >= rear_); 306 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
307 reinterpret_cast<Address>(front_));
274 } 308 }
275 309
276 private: 310 private:
277 // The front of the queue is higher in memory than the rear. 311 // The front of the queue is higher in the memory page chain than the rear.
278 intptr_t* front_; 312 intptr_t* front_;
279 intptr_t* rear_; 313 intptr_t* rear_;
280 314
281 DISALLOW_COPY_AND_ASSIGN(PromotionQueue); 315 DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
282 }; 316 };
283 317
284 318
319 typedef void (*ScavengingCallback)(Map* map,
320 HeapObject** slot,
321 HeapObject* object);
322
323
285 // External strings table is a place where all external strings are 324 // External strings table is a place where all external strings are
286 // registered. We need to keep track of such strings to properly 325 // registered. We need to keep track of such strings to properly
287 // finalize them. 326 // finalize them.
288 class ExternalStringTable { 327 class ExternalStringTable {
289 public: 328 public:
290 // Registers an external string. 329 // Registers an external string.
291 inline void AddString(String* string); 330 inline void AddString(String* string);
292 331
293 inline void Iterate(ObjectVisitor* v); 332 inline void Iterate(ObjectVisitor* v);
294 333
(...skipping 25 matching lines...) Expand all
320 359
321 DISALLOW_COPY_AND_ASSIGN(ExternalStringTable); 360 DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
322 }; 361 };
323 362
324 363
325 class Heap { 364 class Heap {
326 public: 365 public:
327 // Configure heap size before setup. Return false if the heap has been 366 // Configure heap size before setup. Return false if the heap has been
328 // setup already. 367 // setup already.
329 bool ConfigureHeap(int max_semispace_size, 368 bool ConfigureHeap(int max_semispace_size,
330 int max_old_gen_size, 369 intptr_t max_old_gen_size,
331 int max_executable_size); 370 intptr_t max_executable_size);
332 bool ConfigureHeapDefault(); 371 bool ConfigureHeapDefault();
333 372
334 // Initializes the global object heap. If create_heap_objects is true, 373 // Initializes the global object heap. If create_heap_objects is true,
335 // also creates the basic non-mutable objects. 374 // also creates the basic non-mutable objects.
336 // Returns whether it succeeded. 375 // Returns whether it succeeded.
337 bool Setup(bool create_heap_objects); 376 bool Setup(bool create_heap_objects);
338 377
339 // Destroys all memory allocated by the heap. 378 // Destroys all memory allocated by the heap.
340 void TearDown(); 379 void TearDown();
341 380
(...skipping 536 matching lines...) Expand 10 before | Expand all | Expand 10 after
878 // Performs garbage collection operation. 917 // Performs garbage collection operation.
879 // Returns whether there is a chance that another major GC could 918 // Returns whether there is a chance that another major GC could
880 // collect more garbage. 919 // collect more garbage.
881 bool CollectGarbage(AllocationSpace space, GarbageCollector collector); 920 bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
882 921
883 // Performs garbage collection operation. 922 // Performs garbage collection operation.
884 // Returns whether there is a chance that another major GC could 923 // Returns whether there is a chance that another major GC could
885 // collect more garbage. 924 // collect more garbage.
886 inline bool CollectGarbage(AllocationSpace space); 925 inline bool CollectGarbage(AllocationSpace space);
887 926
888 // Performs a full garbage collection. Force compaction if the 927 static const int kNoGCFlags = 0;
889 // parameter is true. 928 static const int kMakeHeapIterableMask = 1;
890 void CollectAllGarbage(bool force_compaction); 929
930 // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
931 // non-zero, then the slower precise sweeper is used, which leaves the heap
932 // in a state where we can iterate over the heap visiting all objects.
933 void CollectAllGarbage(int flags);
891 934
892 // Last hope GC, should try to squeeze as much as possible. 935 // Last hope GC, should try to squeeze as much as possible.
893 void CollectAllAvailableGarbage(); 936 void CollectAllAvailableGarbage();
894 937
938 // Check whether the heap is currently iterable.
939 bool IsHeapIterable();
940
941 // Ensure that we have swept all spaces in such a way that we can iterate
942 // over all objects. May cause a GC.
943 void EnsureHeapIsIterable();
944
895 // Notify the heap that a context has been disposed. 945 // Notify the heap that a context has been disposed.
896 int NotifyContextDisposed() { return ++contexts_disposed_; } 946 int NotifyContextDisposed() { return ++contexts_disposed_; }
897 947
898 // Utility to invoke the scavenger. This is needed in test code to 948 // Utility to invoke the scavenger. This is needed in test code to
899 // ensure correct callback for weak global handles. 949 // ensure correct callback for weak global handles.
900 void PerformScavenge(); 950 void PerformScavenge();
901 951
952 inline void increment_scan_on_scavenge_pages() {
953 scan_on_scavenge_pages_++;
954 if (FLAG_gc_verbose) {
955 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
956 }
957 }
958
959 inline void decrement_scan_on_scavenge_pages() {
960 scan_on_scavenge_pages_--;
961 if (FLAG_gc_verbose) {
962 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
963 }
964 }
965
902 PromotionQueue* promotion_queue() { return &promotion_queue_; } 966 PromotionQueue* promotion_queue() { return &promotion_queue_; }
903 967
904 #ifdef DEBUG 968 #ifdef DEBUG
905 // Utility used with flag gc-greedy. 969 // Utility used with flag gc-greedy.
906 void GarbageCollectionGreedyCheck(); 970 void GarbageCollectionGreedyCheck();
907 #endif 971 #endif
908 972
909 void AddGCPrologueCallback( 973 void AddGCPrologueCallback(
910 GCEpilogueCallback callback, GCType gc_type_filter); 974 GCEpilogueCallback callback, GCType gc_type_filter);
911 void RemoveGCPrologueCallback(GCEpilogueCallback callback); 975 void RemoveGCPrologueCallback(GCEpilogueCallback callback);
912 976
913 void AddGCEpilogueCallback( 977 void AddGCEpilogueCallback(
914 GCEpilogueCallback callback, GCType gc_type_filter); 978 GCEpilogueCallback callback, GCType gc_type_filter);
915 void RemoveGCEpilogueCallback(GCEpilogueCallback callback); 979 void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
916 980
917 void SetGlobalGCPrologueCallback(GCCallback callback) { 981 void SetGlobalGCPrologueCallback(GCCallback callback) {
918 ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL)); 982 ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
919 global_gc_prologue_callback_ = callback; 983 global_gc_prologue_callback_ = callback;
920 } 984 }
921 void SetGlobalGCEpilogueCallback(GCCallback callback) { 985 void SetGlobalGCEpilogueCallback(GCCallback callback) {
922 ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL)); 986 ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
923 global_gc_epilogue_callback_ = callback; 987 global_gc_epilogue_callback_ = callback;
924 } 988 }
925 989
926 // Heap root getters. We have versions with and without type::cast() here. 990 // Heap root getters. We have versions with and without type::cast() here.
927 // You can't use type::cast during GC because the assert fails. 991 // You can't use type::cast during GC because the assert fails.
992 // TODO(1490): Try removing the unchecked accessors, now that GC marking does
993 // not corrupt the stack.
928 #define ROOT_ACCESSOR(type, name, camel_name) \ 994 #define ROOT_ACCESSOR(type, name, camel_name) \
929 type* name() { \ 995 type* name() { \
930 return type::cast(roots_[k##camel_name##RootIndex]); \ 996 return type::cast(roots_[k##camel_name##RootIndex]); \
931 } \ 997 } \
932 type* raw_unchecked_##name() { \ 998 type* raw_unchecked_##name() { \
933 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \ 999 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
934 } 1000 }
935 ROOT_LIST(ROOT_ACCESSOR) 1001 ROOT_LIST(ROOT_ACCESSOR)
936 #undef ROOT_ACCESSOR 1002 #undef ROOT_ACCESSOR
937 1003
(...skipping 20 matching lines...) Expand all
958 } 1024 }
959 Object* global_contexts_list() { return global_contexts_list_; } 1025 Object* global_contexts_list() { return global_contexts_list_; }
960 1026
961 // Iterates over all roots in the heap. 1027 // Iterates over all roots in the heap.
962 void IterateRoots(ObjectVisitor* v, VisitMode mode); 1028 void IterateRoots(ObjectVisitor* v, VisitMode mode);
963 // Iterates over all strong roots in the heap. 1029 // Iterates over all strong roots in the heap.
964 void IterateStrongRoots(ObjectVisitor* v, VisitMode mode); 1030 void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
965 // Iterates over all the other roots in the heap. 1031 // Iterates over all the other roots in the heap.
966 void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); 1032 void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
967 1033
968 enum ExpectedPageWatermarkState {
969 WATERMARK_SHOULD_BE_VALID,
970 WATERMARK_CAN_BE_INVALID
971 };
972
973 // For each dirty region on a page in use from an old space call
974 // visit_dirty_region callback.
975 // If either visit_dirty_region or callback can cause an allocation
976 // in old space and changes in allocation watermark then
977 // can_preallocate_during_iteration should be set to true.
978 // All pages will be marked as having invalid watermark upon
979 // iteration completion.
980 void IterateDirtyRegions(
981 PagedSpace* space,
982 DirtyRegionCallback visit_dirty_region,
983 ObjectSlotCallback callback,
984 ExpectedPageWatermarkState expected_page_watermark_state);
985
986 // Interpret marks as a bitvector of dirty marks for regions of size
987 // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
988 // memory interval from start to top. For each dirty region call a
989 // visit_dirty_region callback. Return updated bitvector of dirty marks.
990 uint32_t IterateDirtyRegions(uint32_t marks,
991 Address start,
992 Address end,
993 DirtyRegionCallback visit_dirty_region,
994 ObjectSlotCallback callback);
995
996 // Iterate pointers to from semispace of new space found in memory interval 1034 // Iterate pointers to from semispace of new space found in memory interval
997 // from start to end. 1035 // from start to end.
998 // Update dirty marks for page containing start address.
999 void IterateAndMarkPointersToFromSpace(Address start, 1036 void IterateAndMarkPointersToFromSpace(Address start,
1000 Address end, 1037 Address end,
1001 ObjectSlotCallback callback); 1038 ObjectSlotCallback callback);
1002 1039
1003 // Iterate pointers to new space found in memory interval from start to end. 1040 // Iterate pointers to new space found in memory interval from start to end.
1004 // Return true if pointers to new space was found. 1041 static void IteratePointersToNewSpace(Heap* heap,
1005 static bool IteratePointersInDirtyRegion(Heap* heap, 1042 Address start,
1006 Address start, 1043 Address end,
1007 Address end, 1044 ObjectSlotCallback callback);
1008 ObjectSlotCallback callback);
1009 1045
1010 1046
1011 // Iterate pointers to new space found in memory interval from start to end. 1047 // Iterate pointers to new space found in memory interval from start to end.
1012 // This interval is considered to belong to the map space. 1048 // This interval is considered to belong to the map space.
1013 // Return true if pointers to new space was found. 1049 static void IteratePointersFromMapsToNewSpace(Heap* heap,
1014 static bool IteratePointersInDirtyMapsRegion(Heap* heap, 1050 Address start,
1015 Address start, 1051 Address end,
1016 Address end, 1052 ObjectSlotCallback callback);
1017 ObjectSlotCallback callback);
1018 1053
1019 1054
1020 // Returns whether the object resides in new space. 1055 // Returns whether the object resides in new space.
1021 inline bool InNewSpace(Object* object); 1056 inline bool InNewSpace(Object* object);
1057 inline bool InNewSpace(Address addr);
1058 inline bool InNewSpacePage(Address addr);
1022 inline bool InFromSpace(Object* object); 1059 inline bool InFromSpace(Object* object);
1023 inline bool InToSpace(Object* object); 1060 inline bool InToSpace(Object* object);
1024 1061
1025 // Checks whether an address/object in the heap (including auxiliary 1062 // Checks whether an address/object in the heap (including auxiliary
1026 // area and unused area). 1063 // area and unused area).
1027 bool Contains(Address addr); 1064 bool Contains(Address addr);
1028 bool Contains(HeapObject* value); 1065 bool Contains(HeapObject* value);
1029 1066
1030 // Checks whether an address/object in a space. 1067 // Checks whether an address/object in a space.
1031 // Currently used by tests, serialization and heap verification only. 1068 // Currently used by tests, serialization and heap verification only.
(...skipping 18 matching lines...) Expand all
1050 1087
1051 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). 1088 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
1052 void public_set_non_monomorphic_cache(NumberDictionary* value) { 1089 void public_set_non_monomorphic_cache(NumberDictionary* value) {
1053 roots_[kNonMonomorphicCacheRootIndex] = value; 1090 roots_[kNonMonomorphicCacheRootIndex] = value;
1054 } 1091 }
1055 1092
1056 void public_set_empty_script(Script* script) { 1093 void public_set_empty_script(Script* script) {
1057 roots_[kEmptyScriptRootIndex] = script; 1094 roots_[kEmptyScriptRootIndex] = script;
1058 } 1095 }
1059 1096
1097 void public_set_store_buffer_top(Address* top) {
1098 roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
1099 }
1100
1060 // Update the next script id. 1101 // Update the next script id.
1061 inline void SetLastScriptId(Object* last_script_id); 1102 inline void SetLastScriptId(Object* last_script_id);
1062 1103
1063 // Generated code can embed this address to get access to the roots. 1104 // Generated code can embed this address to get access to the roots.
1064 Object** roots_address() { return roots_; } 1105 Object** roots_address() { return roots_; }
1065 1106
1107 Address* store_buffer_top_address() {
1108 return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
1109 }
1110
1066 // Get address of global contexts list for serialization support. 1111 // Get address of global contexts list for serialization support.
1067 Object** global_contexts_list_address() { 1112 Object** global_contexts_list_address() {
1068 return &global_contexts_list_; 1113 return &global_contexts_list_;
1069 } 1114 }
1070 1115
1071 #ifdef DEBUG 1116 #ifdef DEBUG
1072 void Print(); 1117 void Print();
1073 void PrintHandles(); 1118 void PrintHandles();
1074 1119
1075 // Verify the heap is in its normal state before or after a GC. 1120 // Verify the heap is in its normal state before or after a GC.
1076 void Verify(); 1121 void Verify();
1077 1122
1123 void OldPointerSpaceCheckStoreBuffer();
1124 void MapSpaceCheckStoreBuffer();
1125 void LargeObjectSpaceCheckStoreBuffer();
1126
1078 // Report heap statistics. 1127 // Report heap statistics.
1079 void ReportHeapStatistics(const char* title); 1128 void ReportHeapStatistics(const char* title);
1080 void ReportCodeStatistics(const char* title); 1129 void ReportCodeStatistics(const char* title);
1081 1130
1082 // Fill in bogus values in from space 1131 // Fill in bogus values in from space
1083 void ZapFromSpace(); 1132 void ZapFromSpace();
1084 #endif 1133 #endif
1085 1134
1086 // Print short heap statistics. 1135 // Print short heap statistics.
1087 void PrintShortHeapStatistics(); 1136 void PrintShortHeapStatistics();
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
1163 1212
1164 // Adjusts the amount of registered external memory. 1213 // Adjusts the amount of registered external memory.
1165 // Returns the adjusted value. 1214 // Returns the adjusted value.
1166 inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes); 1215 inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
1167 1216
1168 // Allocate uninitialized fixed array. 1217 // Allocate uninitialized fixed array.
1169 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length); 1218 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
1170 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length, 1219 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
1171 PretenureFlag pretenure); 1220 PretenureFlag pretenure);
1172 1221
1173 // True if we have reached the allocation limit in the old generation that 1222 inline intptr_t PromotedTotalSize() {
1174 // should force the next GC (caused normally) to be a full one. 1223 return PromotedSpaceSize() + PromotedExternalMemorySize();
1175 bool OldGenerationPromotionLimitReached() {
1176 return (PromotedSpaceSize() + PromotedExternalMemorySize())
1177 > old_gen_promotion_limit_;
1178 }
1179
1180 intptr_t OldGenerationSpaceAvailable() {
1181 return old_gen_allocation_limit_ -
1182 (PromotedSpaceSize() + PromotedExternalMemorySize());
1183 } 1224 }
1184 1225
1185 // True if we have reached the allocation limit in the old generation that 1226 // True if we have reached the allocation limit in the old generation that
1186 // should artificially cause a GC right now. 1227 // should force the next GC (caused normally) to be a full one.
1187 bool OldGenerationAllocationLimitReached() { 1228 inline bool OldGenerationPromotionLimitReached() {
1188 return OldGenerationSpaceAvailable() < 0; 1229 return PromotedTotalSize() > old_gen_promotion_limit_;
1230 }
1231
1232 inline intptr_t OldGenerationSpaceAvailable() {
1233 return old_gen_allocation_limit_ - PromotedTotalSize();
1234 }
1235
1236 static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
1237 static const intptr_t kMinimumAllocationLimit =
1238 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
1239
1240 // When we sweep lazily we initially guess that there is no garbage on the
1241 // heap and set the limits for the next GC accordingly. As we sweep we find
1242 // out that some of the pages contained garbage and we have to adjust
1243 // downwards the size of the heap. This means the limits that control the
1244 // timing of the next GC also need to be adjusted downwards.
1245 void LowerOldGenLimits(intptr_t adjustment) {
1246 size_of_old_gen_at_last_old_space_gc_ -= adjustment;
1247 old_gen_promotion_limit_ =
1248 OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
1249 old_gen_allocation_limit_ =
1250 OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
1251 }
1252
1253 intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
1254 intptr_t limit =
1255 Max(old_gen_size + old_gen_size / 3, kMinimumPromotionLimit);
1256 limit += new_space_.Capacity();
1257 limit *= old_gen_limit_factor_;
1258 return limit;
1259 }
1260
1261 intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
1262 intptr_t limit =
1263 Max(old_gen_size + old_gen_size / 2, kMinimumAllocationLimit);
1264 limit += new_space_.Capacity();
1265 limit *= old_gen_limit_factor_;
1266 return limit;
1189 } 1267 }
1190 1268
1191 // Can be called when the embedding application is idle. 1269 // Can be called when the embedding application is idle.
1192 bool IdleNotification(); 1270 bool IdleNotification();
1193 1271
1194 // Declare all the root indices. 1272 // Declare all the root indices.
1195 enum RootListIndex { 1273 enum RootListIndex {
1196 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, 1274 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
1197 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) 1275 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
1198 #undef ROOT_INDEX_DECLARATION 1276 #undef ROOT_INDEX_DECLARATION
(...skipping 18 matching lines...) Expand all
1217 Map* MapForExternalArrayType(ExternalArrayType array_type); 1295 Map* MapForExternalArrayType(ExternalArrayType array_type);
1218 RootListIndex RootIndexForExternalArrayType( 1296 RootListIndex RootIndexForExternalArrayType(
1219 ExternalArrayType array_type); 1297 ExternalArrayType array_type);
1220 1298
1221 void RecordStats(HeapStats* stats, bool take_snapshot = false); 1299 void RecordStats(HeapStats* stats, bool take_snapshot = false);
1222 1300
1223 // Copy block of memory from src to dst. Size of block should be aligned 1301 // Copy block of memory from src to dst. Size of block should be aligned
1224 // by pointer size. 1302 // by pointer size.
1225 static inline void CopyBlock(Address dst, Address src, int byte_size); 1303 static inline void CopyBlock(Address dst, Address src, int byte_size);
1226 1304
1227 inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
1228 Address src,
1229 int byte_size);
1230
1231 // Optimized version of memmove for blocks with pointer size aligned sizes and 1305 // Optimized version of memmove for blocks with pointer size aligned sizes and
1232 // pointer size aligned addresses. 1306 // pointer size aligned addresses.
1233 static inline void MoveBlock(Address dst, Address src, int byte_size); 1307 static inline void MoveBlock(Address dst, Address src, int byte_size);
1234 1308
1235 inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
1236 Address src,
1237 int byte_size);
1238
1239 // Check new space expansion criteria and expand semispaces if it was hit. 1309 // Check new space expansion criteria and expand semispaces if it was hit.
1240 void CheckNewSpaceExpansionCriteria(); 1310 void CheckNewSpaceExpansionCriteria();
1241 1311
1242 inline void IncrementYoungSurvivorsCounter(int survived) { 1312 inline void IncrementYoungSurvivorsCounter(int survived) {
1243 young_survivors_after_last_gc_ = survived; 1313 young_survivors_after_last_gc_ = survived;
1244 survived_since_last_expansion_ += survived; 1314 survived_since_last_expansion_ += survived;
1245 } 1315 }
1246 1316
1317 inline bool NextGCIsLikelyToBeFull() {
1318 if (FLAG_gc_global) return true;
1319
1320 intptr_t total_promoted = PromotedTotalSize();
1321
1322 intptr_t adjusted_promotion_limit =
1323 old_gen_promotion_limit_ - new_space_.Capacity();
1324
1325 if (total_promoted >= adjusted_promotion_limit) return true;
1326
1327 intptr_t adjusted_allocation_limit =
1328 old_gen_allocation_limit_ - new_space_.Capacity() / 5;
1329
1330 if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
1331
1332 return false;
1333 }
1334
1335
1247 void UpdateNewSpaceReferencesInExternalStringTable( 1336 void UpdateNewSpaceReferencesInExternalStringTable(
1248 ExternalStringTableUpdaterCallback updater_func); 1337 ExternalStringTableUpdaterCallback updater_func);
1249 1338
1339 void UpdateReferencesInExternalStringTable(
1340 ExternalStringTableUpdaterCallback updater_func);
1341
1250 void ProcessWeakReferences(WeakObjectRetainer* retainer); 1342 void ProcessWeakReferences(WeakObjectRetainer* retainer);
1251 1343
1252 // Helper function that governs the promotion policy from new space to 1344 // Helper function that governs the promotion policy from new space to
1253 // old. If the object's old address lies below the new space's age 1345 // old. If the object's old address lies below the new space's age
1254 // mark or if we've already filled the bottom 1/16th of the to space, 1346 // mark or if we've already filled the bottom 1/16th of the to space,
1255 // we try to promote this object. 1347 // we try to promote this object.
1256 inline bool ShouldBePromoted(Address old_address, int object_size); 1348 inline bool ShouldBePromoted(Address old_address, int object_size);
1257 1349
1258 int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; } 1350 int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
1259 1351
1260 void ClearJSFunctionResultCaches(); 1352 void ClearJSFunctionResultCaches();
1261 1353
1262 void ClearNormalizedMapCaches(); 1354 void ClearNormalizedMapCaches();
1263 1355
1264 GCTracer* tracer() { return tracer_; } 1356 GCTracer* tracer() { return tracer_; }
1265 1357
1358 // Returns the size of objects residing in non new spaces.
1359 intptr_t PromotedSpaceSize();
1360
1266 double total_regexp_code_generated() { return total_regexp_code_generated_; } 1361 double total_regexp_code_generated() { return total_regexp_code_generated_; }
1267 void IncreaseTotalRegexpCodeGenerated(int size) { 1362 void IncreaseTotalRegexpCodeGenerated(int size) {
1268 total_regexp_code_generated_ += size; 1363 total_regexp_code_generated_ += size;
1269 } 1364 }
1270 1365
1271 // Returns maximum GC pause. 1366 // Returns maximum GC pause.
1272 int get_max_gc_pause() { return max_gc_pause_; } 1367 int get_max_gc_pause() { return max_gc_pause_; }
1273 1368
1274 // Returns maximum size of objects alive after GC. 1369 // Returns maximum size of objects alive after GC.
1275 intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; } 1370 intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
1276 1371
1277 // Returns minimal interval between two subsequent collections. 1372 // Returns minimal interval between two subsequent collections.
1278 int get_min_in_mutator() { return min_in_mutator_; } 1373 int get_min_in_mutator() { return min_in_mutator_; }
1279 1374
1280 MarkCompactCollector* mark_compact_collector() { 1375 MarkCompactCollector* mark_compact_collector() {
1281 return &mark_compact_collector_; 1376 return &mark_compact_collector_;
1282 } 1377 }
1283 1378
1379 StoreBuffer* store_buffer() {
1380 return &store_buffer_;
1381 }
1382
1383 Marking* marking() {
1384 return &marking_;
1385 }
1386
1387 IncrementalMarking* incremental_marking() {
1388 return &incremental_marking_;
1389 }
1390
1284 ExternalStringTable* external_string_table() { 1391 ExternalStringTable* external_string_table() {
1285 return &external_string_table_; 1392 return &external_string_table_;
1286 } 1393 }
1287 1394
1288 // Returns the current sweep generation. 1395 // Returns the current sweep generation.
1289 int sweep_generation() { 1396 int sweep_generation() {
1290 return sweep_generation_; 1397 return sweep_generation_;
1291 } 1398 }
1292 1399
1293 inline Isolate* isolate(); 1400 inline Isolate* isolate();
1294 bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
1295 1401
1296 void CallGlobalGCPrologueCallback() { 1402 inline void CallGlobalGCPrologueCallback() {
1297 if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_(); 1403 if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
1298 } 1404 }
1299 1405
1300 void CallGlobalGCEpilogueCallback() { 1406 inline void CallGlobalGCEpilogueCallback() {
1301 if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_(); 1407 if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
1302 } 1408 }
1303 1409
1410 inline bool OldGenerationAllocationLimitReached();
1411
1412 inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1413 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1414 }
1415
1416 bool ShouldWeGiveBackAPageToTheOS() {
1417 last_empty_page_was_given_back_to_the_os_ =
1418 !last_empty_page_was_given_back_to_the_os_;
1419 return last_empty_page_was_given_back_to_the_os_;
1420 }
1421
1422 void QueueMemoryChunkForFree(MemoryChunk* chunk);
1423 void FreeQueuedChunks();
1424
1425 // Completely clear the Instanceof cache (to stop it keeping objects alive
1426 // around a GC).
1427 inline void CompletelyClearInstanceofCache();
1428
1304 private: 1429 private:
1305 Heap(); 1430 Heap();
1306 1431
1307 // This can be calculated directly from a pointer to the heap; however, it is 1432 // This can be calculated directly from a pointer to the heap; however, it is
1308 // more expedient to get at the isolate directly from within Heap methods. 1433 // more expedient to get at the isolate directly from within Heap methods.
1309 Isolate* isolate_; 1434 Isolate* isolate_;
1310 1435
1436 intptr_t code_range_size_;
1311 int reserved_semispace_size_; 1437 int reserved_semispace_size_;
1312 int max_semispace_size_; 1438 int max_semispace_size_;
1313 int initial_semispace_size_; 1439 int initial_semispace_size_;
1314 intptr_t max_old_generation_size_; 1440 intptr_t max_old_generation_size_;
1315 intptr_t max_executable_size_; 1441 intptr_t max_executable_size_;
1316 intptr_t code_range_size_;
1317 1442
1318 // For keeping track of how much data has survived 1443 // For keeping track of how much data has survived
1319 // scavenge since last new space expansion. 1444 // scavenge since last new space expansion.
1320 int survived_since_last_expansion_; 1445 int survived_since_last_expansion_;
1321 1446
1322 // For keeping track on when to flush RegExp code. 1447 // For keeping track on when to flush RegExp code.
1323 int sweep_generation_; 1448 int sweep_generation_;
1324 1449
1325 int always_allocate_scope_depth_; 1450 int always_allocate_scope_depth_;
1326 int linear_allocation_scope_depth_; 1451 int linear_allocation_scope_depth_;
1327 1452
1328 // For keeping track of context disposals. 1453 // For keeping track of context disposals.
1329 int contexts_disposed_; 1454 int contexts_disposed_;
1330 1455
1456 int scan_on_scavenge_pages_;
1457
1331 #if defined(V8_TARGET_ARCH_X64) 1458 #if defined(V8_TARGET_ARCH_X64)
1332 static const int kMaxObjectSizeInNewSpace = 1024*KB; 1459 static const int kMaxObjectSizeInNewSpace = 1024*KB;
1333 #else 1460 #else
1334 static const int kMaxObjectSizeInNewSpace = 512*KB; 1461 static const int kMaxObjectSizeInNewSpace = 512*KB;
1335 #endif 1462 #endif
1336 1463
1337 NewSpace new_space_; 1464 NewSpace new_space_;
1338 OldSpace* old_pointer_space_; 1465 OldSpace* old_pointer_space_;
1339 OldSpace* old_data_space_; 1466 OldSpace* old_data_space_;
1340 OldSpace* code_space_; 1467 OldSpace* code_space_;
1341 MapSpace* map_space_; 1468 MapSpace* map_space_;
1342 CellSpace* cell_space_; 1469 CellSpace* cell_space_;
1343 LargeObjectSpace* lo_space_; 1470 LargeObjectSpace* lo_space_;
1344 HeapState gc_state_; 1471 HeapState gc_state_;
1345 int gc_post_processing_depth_; 1472 int gc_post_processing_depth_;
1346 1473
1347 // Returns the size of object residing in non new spaces.
1348 intptr_t PromotedSpaceSize();
1349
1350 // Returns the amount of external memory registered since last global gc. 1474 // Returns the amount of external memory registered since last global gc.
1351 int PromotedExternalMemorySize(); 1475 int PromotedExternalMemorySize();
1352 1476
1353 int mc_count_; // how many mark-compact collections happened
1354 int ms_count_; // how many mark-sweep collections happened 1477 int ms_count_; // how many mark-sweep collections happened
1355 unsigned int gc_count_; // how many gc happened 1478 unsigned int gc_count_; // how many gc happened
1356 1479
1357 // Total length of the strings we failed to flatten since the last GC. 1480 // Total length of the strings we failed to flatten since the last GC.
1358 int unflattened_strings_length_; 1481 int unflattened_strings_length_;
1359 1482
1360 #define ROOT_ACCESSOR(type, name, camel_name) \ 1483 #define ROOT_ACCESSOR(type, name, camel_name) \
1361 inline void set_##name(type* value) { \ 1484 inline void set_##name(type* value) { \
1362 roots_[k##camel_name##RootIndex] = value; \ 1485 roots_[k##camel_name##RootIndex] = value; \
1363 } 1486 }
(...skipping 18 matching lines...) Expand all
1382 // Limit that triggers a global GC on the next (normally caused) GC. This 1505 // Limit that triggers a global GC on the next (normally caused) GC. This
1383 // is checked when we have already decided to do a GC to help determine 1506 // is checked when we have already decided to do a GC to help determine
1384 // which collector to invoke. 1507 // which collector to invoke.
1385 intptr_t old_gen_promotion_limit_; 1508 intptr_t old_gen_promotion_limit_;
1386 1509
1387 // Limit that triggers a global GC as soon as is reasonable. This is 1510 // Limit that triggers a global GC as soon as is reasonable. This is
1388 // checked before expanding a paged space in the old generation and on 1511 // checked before expanding a paged space in the old generation and on
1389 // every allocation in large object space. 1512 // every allocation in large object space.
1390 intptr_t old_gen_allocation_limit_; 1513 intptr_t old_gen_allocation_limit_;
1391 1514
1515 // Sometimes the heuristics dictate that those limits are increased. This
1516 // variable records that fact.
1517 int old_gen_limit_factor_;
1518
1519 // Used to adjust the limits that control the timing of the next GC.
1520 intptr_t size_of_old_gen_at_last_old_space_gc_;
1521
1392 // Limit on the amount of externally allocated memory allowed 1522 // Limit on the amount of externally allocated memory allowed
1393 // between global GCs. If reached a global GC is forced. 1523 // between global GCs. If reached a global GC is forced.
1394 intptr_t external_allocation_limit_; 1524 intptr_t external_allocation_limit_;
1395 1525
1396 // The amount of external memory registered through the API kept alive 1526 // The amount of external memory registered through the API kept alive
1397 // by global handles 1527 // by global handles
1398 int amount_of_external_allocated_memory_; 1528 int amount_of_external_allocated_memory_;
1399 1529
1400 // Caches the amount of external memory registered at the last global gc. 1530 // Caches the amount of external memory registered at the last global gc.
1401 int amount_of_external_allocated_memory_at_last_global_gc_; 1531 int amount_of_external_allocated_memory_at_last_global_gc_;
1402 1532
1403 // Indicates that an allocation has failed in the old generation since the 1533 // Indicates that an allocation has failed in the old generation since the
1404 // last GC. 1534 // last GC.
1405 int old_gen_exhausted_; 1535 int old_gen_exhausted_;
1406 1536
1407 Object* roots_[kRootListLength]; 1537 Object* roots_[kRootListLength];
1408 1538
1409 Object* global_contexts_list_; 1539 Object* global_contexts_list_;
1410 1540
1541 StoreBufferRebuilder store_buffer_rebuilder_;
1542
1411 struct StringTypeTable { 1543 struct StringTypeTable {
1412 InstanceType type; 1544 InstanceType type;
1413 int size; 1545 int size;
1414 RootListIndex index; 1546 RootListIndex index;
1415 }; 1547 };
1416 1548
1417 struct ConstantSymbolTable { 1549 struct ConstantSymbolTable {
1418 const char* contents; 1550 const char* contents;
1419 RootListIndex index; 1551 RootListIndex index;
1420 }; 1552 };
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1458 GCType gc_type; 1590 GCType gc_type;
1459 }; 1591 };
1460 List<GCEpilogueCallbackPair> gc_epilogue_callbacks_; 1592 List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
1461 1593
1462 GCCallback global_gc_prologue_callback_; 1594 GCCallback global_gc_prologue_callback_;
1463 GCCallback global_gc_epilogue_callback_; 1595 GCCallback global_gc_epilogue_callback_;
1464 1596
1465 // Support for computing object sizes during GC. 1597 // Support for computing object sizes during GC.
1466 HeapObjectCallback gc_safe_size_of_old_object_; 1598 HeapObjectCallback gc_safe_size_of_old_object_;
1467 static int GcSafeSizeOfOldObject(HeapObject* object); 1599 static int GcSafeSizeOfOldObject(HeapObject* object);
1468 static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
1469 1600
1470 // Update the GC state. Called from the mark-compact collector. 1601 // Update the GC state. Called from the mark-compact collector.
1471 void MarkMapPointersAsEncoded(bool encoded) { 1602 void MarkMapPointersAsEncoded(bool encoded) {
1472 gc_safe_size_of_old_object_ = encoded 1603 ASSERT(!encoded);
1473 ? &GcSafeSizeOfOldObjectWithEncodedMap 1604 gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
1474 : &GcSafeSizeOfOldObject;
1475 } 1605 }
1476 1606
1477 // Checks whether a global GC is necessary 1607 // Checks whether a global GC is necessary
1478 GarbageCollector SelectGarbageCollector(AllocationSpace space); 1608 GarbageCollector SelectGarbageCollector(AllocationSpace space);
1479 1609
1480 // Performs garbage collection 1610 // Performs garbage collection
1481 // Returns whether there is a chance another major GC could 1611 // Returns whether there is a chance another major GC could
1482 // collect more garbage. 1612 // collect more garbage.
1483 bool PerformGarbageCollection(GarbageCollector collector, 1613 bool PerformGarbageCollection(GarbageCollector collector,
1484 GCTracer* tracer); 1614 GCTracer* tracer);
1485 1615
1486 static const intptr_t kMinimumPromotionLimit = 2 * MB;
1487 static const intptr_t kMinimumAllocationLimit = 8 * MB;
1488 1616
1489 inline void UpdateOldSpaceLimits(); 1617 inline void UpdateOldSpaceLimits();
1490 1618
1619
1491 // Allocate an uninitialized object in map space. The behavior is identical 1620 // Allocate an uninitialized object in map space. The behavior is identical
1492 // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't 1621 // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
1493 // have to test the allocation space argument and (b) can reduce code size 1622 // have to test the allocation space argument and (b) can reduce code size
1494 // (since both AllocateRaw and AllocateRawMap are inlined). 1623 // (since both AllocateRaw and AllocateRawMap are inlined).
1495 MUST_USE_RESULT inline MaybeObject* AllocateRawMap(); 1624 MUST_USE_RESULT inline MaybeObject* AllocateRawMap();
1496 1625
1497 // Allocate an uninitialized object in the global property cell space. 1626 // Allocate an uninitialized object in the global property cell space.
1498 MUST_USE_RESULT inline MaybeObject* AllocateRawCell(); 1627 MUST_USE_RESULT inline MaybeObject* AllocateRawCell();
1499 1628
1500 // Initializes a JSObject based on its map. 1629 // Initializes a JSObject based on its map.
(...skipping 14 matching lines...) Expand all
1515 MaybeObject* CreateOddball(const char* to_string, 1644 MaybeObject* CreateOddball(const char* to_string,
1516 Object* to_number, 1645 Object* to_number,
1517 byte kind); 1646 byte kind);
1518 1647
1519 // Allocate empty fixed array. 1648 // Allocate empty fixed array.
1520 MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray(); 1649 MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
1521 1650
1522 // Allocate empty fixed double array. 1651 // Allocate empty fixed double array.
1523 MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray(); 1652 MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
1524 1653
1525 void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
1526
1527 // Performs a minor collection in new generation. 1654 // Performs a minor collection in new generation.
1528 void Scavenge(); 1655 void Scavenge();
1529 1656
1530 static String* UpdateNewSpaceReferenceInExternalStringTableEntry( 1657 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
1531 Heap* heap, 1658 Heap* heap,
1532 Object** pointer); 1659 Object** pointer);
1533 1660
1534 Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); 1661 Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
1662 static void ScavengeStoreBufferCallback(Heap* heap,
1663 MemoryChunk* page,
1664 StoreBufferEvent event);
1535 1665
1536 // Performs a major collection in the whole heap. 1666 // Performs a major collection in the whole heap.
1537 void MarkCompact(GCTracer* tracer); 1667 void MarkCompact(GCTracer* tracer);
1538 1668
1539 // Code to be run before and after mark-compact. 1669 // Code to be run before and after mark-compact.
1540 void MarkCompactPrologue(bool is_compacting); 1670 void MarkCompactPrologue();
1541
1542 // Completely clear the Instanceof cache (to stop it keeping objects alive
1543 // around a GC).
1544 inline void CompletelyClearInstanceofCache();
1545 1671
1546 // Record statistics before and after garbage collection. 1672 // Record statistics before and after garbage collection.
1547 void ReportStatisticsBeforeGC(); 1673 void ReportStatisticsBeforeGC();
1548 void ReportStatisticsAfterGC(); 1674 void ReportStatisticsAfterGC();
1549 1675
1550 // Slow part of scavenge object. 1676 // Slow part of scavenge object.
1551 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); 1677 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
1552 1678
1553 // Initializes a function with a shared part and prototype. 1679 // Initializes a function with a shared part and prototype.
1554 // Returns the function. 1680 // Returns the function.
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1614 } 1740 }
1615 1741
1616 bool IsIncreasingSurvivalTrend() { 1742 bool IsIncreasingSurvivalTrend() {
1617 return survival_rate_trend() == INCREASING; 1743 return survival_rate_trend() == INCREASING;
1618 } 1744 }
1619 1745
1620 bool IsHighSurvivalRate() { 1746 bool IsHighSurvivalRate() {
1621 return high_survival_rate_period_length_ > 0; 1747 return high_survival_rate_period_length_ > 0;
1622 } 1748 }
1623 1749
1750 void SelectScavengingVisitorsTable();
1751
1624 static const int kInitialSymbolTableSize = 2048; 1752 static const int kInitialSymbolTableSize = 2048;
1625 static const int kInitialEvalCacheSize = 64; 1753 static const int kInitialEvalCacheSize = 64;
1626 1754
1627 // Maximum GC pause. 1755 // Maximum GC pause.
1628 int max_gc_pause_; 1756 int max_gc_pause_;
1629 1757
1630 // Maximum size of objects alive after GC. 1758 // Maximum size of objects alive after GC.
1631 intptr_t max_alive_after_gc_; 1759 intptr_t max_alive_after_gc_;
1632 1760
1633 // Minimal interval between two subsequent collections. 1761 // Minimal interval between two subsequent collections.
1634 int min_in_mutator_; 1762 int min_in_mutator_;
1635 1763
1636 // Size of objects alive after last GC. 1764 // Size of objects alive after last GC.
1637 intptr_t alive_after_last_gc_; 1765 intptr_t alive_after_last_gc_;
1638 1766
1639 double last_gc_end_timestamp_; 1767 double last_gc_end_timestamp_;
1640 1768
1641 MarkCompactCollector mark_compact_collector_; 1769 MarkCompactCollector mark_compact_collector_;
1642 1770
1643 // This field contains the meaning of the WATERMARK_INVALIDATED flag. 1771 StoreBuffer store_buffer_;
1644 // Instead of clearing this flag from all pages we just flip 1772
1645 // its meaning at the beginning of a scavenge. 1773 Marking marking_;
1646 intptr_t page_watermark_invalidated_mark_; 1774
1775 IncrementalMarking incremental_marking_;
1647 1776
1648 int number_idle_notifications_; 1777 int number_idle_notifications_;
1649 unsigned int last_idle_notification_gc_count_; 1778 unsigned int last_idle_notification_gc_count_;
1650 bool last_idle_notification_gc_count_init_; 1779 bool last_idle_notification_gc_count_init_;
1651 1780
1652 // Shared state read by the scavenge collector and set by ScavengeObject. 1781 // Shared state read by the scavenge collector and set by ScavengeObject.
1653 PromotionQueue promotion_queue_; 1782 PromotionQueue promotion_queue_;
1654 1783
1655 // Flag is set when the heap has been configured. The heap can be repeatedly 1784 // Flag is set when the heap has been configured. The heap can be repeatedly
1656 // configured through the API until it is setup. 1785 // configured through the API until it is setup.
1657 bool configured_; 1786 bool configured_;
1658 1787
1659 ExternalStringTable external_string_table_; 1788 ExternalStringTable external_string_table_;
1660 1789
1661 bool is_safe_to_read_maps_; 1790 VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
1791
1792 bool last_empty_page_was_given_back_to_the_os_;
1793 MemoryChunk* chunks_queued_for_free_;
1662 1794
1663 friend class Factory; 1795 friend class Factory;
1664 friend class GCTracer; 1796 friend class GCTracer;
1665 friend class DisallowAllocationFailure; 1797 friend class DisallowAllocationFailure;
1666 friend class AlwaysAllocateScope; 1798 friend class AlwaysAllocateScope;
1667 friend class LinearAllocationScope; 1799 friend class LinearAllocationScope;
1668 friend class Page; 1800 friend class Page;
1669 friend class Isolate; 1801 friend class Isolate;
1670 friend class MarkCompactCollector; 1802 friend class MarkCompactCollector;
1671 friend class StaticMarkingVisitor; 1803 friend class StaticMarkingVisitor;
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
1750 void VisitPointers(Object** start, Object** end) { 1882 void VisitPointers(Object** start, Object** end) {
1751 for (Object** current = start; current < end; current++) { 1883 for (Object** current = start; current < end; current++) {
1752 if ((*current)->IsHeapObject()) { 1884 if ((*current)->IsHeapObject()) {
1753 HeapObject* object = HeapObject::cast(*current); 1885 HeapObject* object = HeapObject::cast(*current);
1754 ASSERT(HEAP->Contains(object)); 1886 ASSERT(HEAP->Contains(object));
1755 ASSERT(object->map()->IsMap()); 1887 ASSERT(object->map()->IsMap());
1756 } 1888 }
1757 } 1889 }
1758 } 1890 }
1759 }; 1891 };
1760
1761
1762 // Visitor class to verify interior pointers in spaces that use region marks
1763 // to keep track of intergenerational references.
1764 // As VerifyPointersVisitor but also checks that dirty marks are set
1765 // for regions covering intergenerational references.
1766 class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
1767 public:
1768 void VisitPointers(Object** start, Object** end) {
1769 for (Object** current = start; current < end; current++) {
1770 if ((*current)->IsHeapObject()) {
1771 HeapObject* object = HeapObject::cast(*current);
1772 ASSERT(HEAP->Contains(object));
1773 ASSERT(object->map()->IsMap());
1774 if (HEAP->InNewSpace(object)) {
1775 ASSERT(HEAP->InToSpace(object));
1776 Address addr = reinterpret_cast<Address>(current);
1777 ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
1778 }
1779 }
1780 }
1781 }
1782 };
1783 #endif 1892 #endif
1784 1893
1785 1894
1786 // Space iterator for iterating over all spaces of the heap. 1895 // Space iterator for iterating over all spaces of the heap.
1787 // Returns each space in turn, and null when it is done. 1896 // Returns each space in turn, and null when it is done.
1788 class AllSpaces BASE_EMBEDDED { 1897 class AllSpaces BASE_EMBEDDED {
1789 public: 1898 public:
1790 Space* next(); 1899 Space* next();
1791 AllSpaces() { counter_ = FIRST_SPACE; } 1900 AllSpaces() { counter_ = FIRST_SPACE; }
1792 private: 1901 private:
(...skipping 312 matching lines...) Expand 10 before | Expand all | Expand 10 after
2105 2214
2106 // Sets the collector. 2215 // Sets the collector.
2107 void set_collector(GarbageCollector collector) { collector_ = collector; } 2216 void set_collector(GarbageCollector collector) { collector_ = collector; }
2108 2217
2109 // Sets the GC count. 2218 // Sets the GC count.
2110 void set_gc_count(unsigned int count) { gc_count_ = count; } 2219 void set_gc_count(unsigned int count) { gc_count_ = count; }
2111 2220
2112 // Sets the full GC count. 2221 // Sets the full GC count.
2113 void set_full_gc_count(int count) { full_gc_count_ = count; } 2222 void set_full_gc_count(int count) { full_gc_count_ = count; }
2114 2223
2115 // Sets the flag that this is a compacting full GC.
2116 void set_is_compacting() { is_compacting_ = true; }
2117 bool is_compacting() const { return is_compacting_; }
2118
2119 // Increment and decrement the count of marked objects.
2120 void increment_marked_count() { ++marked_count_; }
2121 void decrement_marked_count() { --marked_count_; }
2122
2123 int marked_count() { return marked_count_; }
2124
2125 void increment_promoted_objects_size(int object_size) { 2224 void increment_promoted_objects_size(int object_size) {
2126 promoted_objects_size_ += object_size; 2225 promoted_objects_size_ += object_size;
2127 } 2226 }
2128 2227
2129 private: 2228 private:
2130 // Returns a string matching the collector. 2229 // Returns a string matching the collector.
2131 const char* CollectorString(); 2230 const char* CollectorString();
2132 2231
2133 // Returns size of object in heap (in MB). 2232 // Returns size of object in heap (in MB).
2134 double SizeOfHeapObjects() { 2233 double SizeOfHeapObjects() {
2135 return (static_cast<double>(HEAP->SizeOfObjects())) / MB; 2234 return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
2136 } 2235 }
2137 2236
2138 double start_time_; // Timestamp set in the constructor. 2237 double start_time_; // Timestamp set in the constructor.
2139 intptr_t start_size_; // Size of objects in heap set in constructor. 2238 intptr_t start_size_; // Size of objects in heap set in constructor.
2140 GarbageCollector collector_; // Type of collector. 2239 GarbageCollector collector_; // Type of collector.
2141 2240
2142 // A count (including this one, eg, the first collection is 1) of the 2241 // A count (including this one, eg, the first collection is 1) of the
2143 // number of garbage collections. 2242 // number of garbage collections.
2144 unsigned int gc_count_; 2243 unsigned int gc_count_;
2145 2244
2146 // A count (including this one) of the number of full garbage collections. 2245 // A count (including this one) of the number of full garbage collections.
2147 int full_gc_count_; 2246 int full_gc_count_;
2148 2247
2149 // True if the current GC is a compacting full collection, false
2150 // otherwise.
2151 bool is_compacting_;
2152
2153 // True if the *previous* full GC cwas a compacting collection (will be
2154 // false if there has not been a previous full GC).
2155 bool previous_has_compacted_;
2156
2157 // On a full GC, a count of the number of marked objects. Incremented
2158 // when an object is marked and decremented when an object's mark bit is
2159 // cleared. Will be zero on a scavenge collection.
2160 int marked_count_;
2161
2162 // The count from the end of the previous full GC. Will be zero if there
2163 // was no previous full GC.
2164 int previous_marked_count_;
2165
2166 // Amounts of time spent in different scopes during GC. 2248 // Amounts of time spent in different scopes during GC.
2167 double scopes_[Scope::kNumberOfScopes]; 2249 double scopes_[Scope::kNumberOfScopes];
2168 2250
2169 // Total amount of space either wasted or contained in one of free lists 2251 // Total amount of space either wasted or contained in one of free lists
2170 // before the current GC. 2252 // before the current GC.
2171 intptr_t in_free_list_or_wasted_before_gc_; 2253 intptr_t in_free_list_or_wasted_before_gc_;
2172 2254
2173 // Difference between space used in the heap at the beginning of the current 2255 // Difference between space used in the heap at the beginning of the current
2174 // collection and the end of the previous collection. 2256 // collection and the end of the previous collection.
2175 intptr_t allocated_since_last_gc_; 2257 intptr_t allocated_since_last_gc_;
2176 2258
2177 // Amount of time spent in mutator that is time elapsed between end of the 2259 // Amount of time spent in mutator that is time elapsed between end of the
2178 // previous collection and the beginning of the current one. 2260 // previous collection and the beginning of the current one.
2179 double spent_in_mutator_; 2261 double spent_in_mutator_;
2180 2262
2181 // Size of objects promoted during the current collection. 2263 // Size of objects promoted during the current collection.
2182 intptr_t promoted_objects_size_; 2264 intptr_t promoted_objects_size_;
2183 2265
2266 // Incremental marking steps counters.
2267 int steps_count_;
2268 double steps_took_;
2269 double longest_step_;
2270 int steps_count_since_last_gc_;
2271 double steps_took_since_last_gc_;
2272
2184 Heap* heap_; 2273 Heap* heap_;
2185 }; 2274 };
2186 2275
2187 2276
2188 class StringSplitCache { 2277 class StringSplitCache {
2189 public: 2278 public:
2190 static Object* Lookup(FixedArray* cache, String* string, String* pattern); 2279 static Object* Lookup(FixedArray* cache, String* string, String* pattern);
2191 static void Enter(Heap* heap, 2280 static void Enter(Heap* heap,
2192 FixedArray* cache, 2281 FixedArray* cache,
2193 String* string, 2282 String* string,
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
2285 public: 2374 public:
2286 virtual ~WeakObjectRetainer() {} 2375 virtual ~WeakObjectRetainer() {}
2287 2376
2288 // Return whether this object should be retained. If NULL is returned the 2377 // Return whether this object should be retained. If NULL is returned the
2289 // object has no references. Otherwise the address of the retained object 2378 // object has no references. Otherwise the address of the retained object
2290 // should be returned as in some GC situations the object has been moved. 2379 // should be returned as in some GC situations the object has been moved.
2291 virtual Object* RetainAs(Object* object) = 0; 2380 virtual Object* RetainAs(Object* object) = 0;
2292 }; 2381 };
2293 2382
2294 2383
2384 // Intrusive object marking uses least significant bit of
2385 // heap object's map word to mark objects.
2386 // Normally all map words have least significant bit set
2387 // because they contain tagged map pointer.
2388 // If the bit is not set object is marked.
2389 // All objects should be unmarked before resuming
2390 // JavaScript execution.
2391 class IntrusiveMarking {
2392 public:
2393 static bool IsMarked(HeapObject* object) {
2394 return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
2395 }
2396
2397 static void ClearMark(HeapObject* object) {
2398 uintptr_t map_word = object->map_word().ToRawValue();
2399 object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
2400 ASSERT(!IsMarked(object));
2401 }
2402
2403 static void SetMark(HeapObject* object) {
2404 uintptr_t map_word = object->map_word().ToRawValue();
2405 object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
2406 ASSERT(IsMarked(object));
2407 }
2408
2409 static Map* MapOfMarkedObject(HeapObject* object) {
2410 uintptr_t map_word = object->map_word().ToRawValue();
2411 return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
2412 }
2413
2414 static int SizeOfMarkedObject(HeapObject* object) {
2415 return object->SizeFromMap(MapOfMarkedObject(object));
2416 }
2417
2418 private:
2419 static const uintptr_t kNotMarkedBit = 0x1;
2420 STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
2421 };
2422
2423
2295 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST) 2424 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
2296 // Helper class for tracing paths to a search target Object from all roots. 2425 // Helper class for tracing paths to a search target Object from all roots.
2297 // The TracePathFrom() method can be used to trace paths from a specific 2426 // The TracePathFrom() method can be used to trace paths from a specific
2298 // object to the search target object. 2427 // object to the search target object.
2299 class PathTracer : public ObjectVisitor { 2428 class PathTracer : public ObjectVisitor {
2300 public: 2429 public:
2301 enum WhatToFind { 2430 enum WhatToFind {
2302 FIND_ALL, // Will find all matches. 2431 FIND_ALL, // Will find all matches.
2303 FIND_FIRST // Will stop the search after first match. 2432 FIND_FIRST // Will stop the search after first match.
2304 }; 2433 };
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
2343 WhatToFind what_to_find_; 2472 WhatToFind what_to_find_;
2344 VisitMode visit_mode_; 2473 VisitMode visit_mode_;
2345 List<Object*> object_stack_; 2474 List<Object*> object_stack_;
2346 2475
2347 AssertNoAllocation no_alloc; // i.e. no gc allowed. 2476 AssertNoAllocation no_alloc; // i.e. no gc allowed.
2348 2477
2349 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); 2478 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
2350 }; 2479 };
2351 #endif // DEBUG || LIVE_OBJECT_LIST 2480 #endif // DEBUG || LIVE_OBJECT_LIST
2352 2481
2353
2354 } } // namespace v8::internal 2482 } } // namespace v8::internal
2355 2483
2356 #undef HEAP 2484 #undef HEAP
2357 2485
2358 #endif // V8_HEAP_H_ 2486 #endif // V8_HEAP_H_
OLDNEW
« no previous file with comments | « src/frames.cc ('k') | src/heap.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698