OLD | NEW |
1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
42 #include "snapshot.h" | 42 #include "snapshot.h" |
43 #include "v8threads.h" | 43 #include "v8threads.h" |
44 #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP | 44 #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP |
45 #include "regexp-macro-assembler.h" | 45 #include "regexp-macro-assembler.h" |
46 #include "arm/regexp-macro-assembler-arm.h" | 46 #include "arm/regexp-macro-assembler-arm.h" |
47 #endif | 47 #endif |
48 | 48 |
49 namespace v8 { | 49 namespace v8 { |
50 namespace internal { | 50 namespace internal { |
51 | 51 |
52 | 52 // A queue of pointers and maps of to-be-promoted objects during a |
53 String* Heap::hidden_symbol_; | 53 // scavenge collection. |
54 Object* Heap::roots_[Heap::kRootListLength]; | 54 class PromotionQueue { |
55 | 55 public: |
56 | 56 void Initialize(Address start_address) { |
57 NewSpace Heap::new_space_; | 57 front_ = rear_ = reinterpret_cast<HeapObject**>(start_address); |
58 OldSpace* Heap::old_pointer_space_ = NULL; | 58 } |
59 OldSpace* Heap::old_data_space_ = NULL; | 59 |
60 OldSpace* Heap::code_space_ = NULL; | 60 bool is_empty() { return front_ <= rear_; } |
61 MapSpace* Heap::map_space_ = NULL; | 61 |
62 CellSpace* Heap::cell_space_ = NULL; | 62 void insert(HeapObject* object, Map* map) { |
63 LargeObjectSpace* Heap::lo_space_ = NULL; | 63 *(--rear_) = object; |
| 64 *(--rear_) = map; |
| 65 // Assert no overflow into live objects. |
| 66 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top()); |
| 67 } |
| 68 |
| 69 void remove(HeapObject** object, Map** map) { |
| 70 *object = *(--front_); |
| 71 *map = Map::cast(*(--front_)); |
| 72 // Assert no underflow. |
| 73 ASSERT(front_ >= rear_); |
| 74 } |
| 75 |
| 76 private: |
| 77 // The front of the queue is higher in memory than the rear. |
| 78 HeapObject** front_; |
| 79 HeapObject** rear_; |
| 80 }; |
| 81 |
| 82 class HeapPrivateData { |
| 83 public: |
| 84 // Shared state read by the scavenge collector and set by ScavengeObject. |
| 85 PromotionQueue promotion_queue_; |
| 86 int number_idle_notifications_; |
| 87 int last_gc_count_; |
| 88 void* paged_rset_histogram_; |
| 89 |
| 90 #ifdef DEBUG |
| 91 bool search_for_any_global_; |
| 92 Object* search_target_; |
| 93 bool found_target_; |
| 94 List<Object*> object_stack_; |
| 95 |
| 96 void MarkRootObjectRecursively(Object** root); |
| 97 #endif |
| 98 |
| 99 HeapPrivateData() |
| 100 :number_idle_notifications_(0), |
| 101 last_gc_count_(0), |
| 102 #ifdef DEBUG |
| 103 search_target_(NULL), |
| 104 object_stack_(20), |
| 105 search_for_any_global_(false), |
| 106 found_target_(NULL), |
| 107 #endif |
| 108 paged_rset_histogram_(NULL) { |
| 109 } |
| 110 |
| 111 DISALLOW_COPY_AND_ASSIGN(HeapPrivateData); |
| 112 }; |
64 | 113 |
65 static const int kMinimumPromotionLimit = 2*MB; | 114 static const int kMinimumPromotionLimit = 2*MB; |
66 static const int kMinimumAllocationLimit = 8*MB; | 115 static const int kMinimumAllocationLimit = 8*MB; |
67 | 116 |
68 int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit; | |
69 int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit; | |
70 | |
71 int Heap::old_gen_exhausted_ = false; | |
72 | |
73 int Heap::amount_of_external_allocated_memory_ = 0; | |
74 int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0; | |
75 | |
76 // semispace_size_ should be a power of 2 and old_generation_size_ should be | 117 // semispace_size_ should be a power of 2 and old_generation_size_ should be |
77 // a multiple of Page::kPageSize. | 118 // a multiple of Page::kPageSize. |
78 #if defined(ANDROID) | 119 #if defined(ANDROID) |
79 int Heap::max_semispace_size_ = 512*KB; | 120 static const int kMaxSemispaceSize = 512*KB; |
80 int Heap::max_old_generation_size_ = 128*MB; | 121 static const int kMaxOldGenerationSize = 128*MB; |
81 int Heap::initial_semispace_size_ = 128*KB; | 122 static const int kInitialSemispaceSize = 128*KB; |
82 size_t Heap::code_range_size_ = 0; | 123 static const size_t kCodeRangeSize = 0; |
83 #elif defined(V8_TARGET_ARCH_X64) | 124 #elif defined(V8_TARGET_ARCH_X64) |
84 int Heap::max_semispace_size_ = 16*MB; | 125 static const int kMaxSemispaceSize = 16*MB; |
85 int Heap::max_old_generation_size_ = 1*GB; | 126 static const int kMaxOldGenerationSize = 1*GB; |
86 int Heap::initial_semispace_size_ = 1*MB; | 127 static const int kInitialSemispaceSize = 1*MB; |
87 size_t Heap::code_range_size_ = 512*MB; | 128 static const size_t kCodeRangeSize = 512*MB; |
88 #else | 129 #else |
89 int Heap::max_semispace_size_ = 8*MB; | 130 static const int kMaxSemispaceSize = 8*MB; |
90 int Heap::max_old_generation_size_ = 512*MB; | 131 static const int kMaxOldGenerationSize = 512*MB; |
91 int Heap::initial_semispace_size_ = 512*KB; | 132 static const int kInitialSemispaceSize = 512*KB; |
92 size_t Heap::code_range_size_ = 0; | 133 static const size_t kCodeRangeSize = 0; |
93 #endif | 134 #endif |
94 | 135 |
95 // The snapshot semispace size will be the default semispace size if | 136 HeapData::HeapData() |
96 // snapshotting is used and will be the requested semispace size as | 137 :hidden_symbol_(NULL), |
97 // set up by ConfigureHeap otherwise. | 138 old_pointer_space_(NULL), |
98 int Heap::reserved_semispace_size_ = Heap::max_semispace_size_; | 139 old_data_space_(NULL), |
99 | 140 code_space_(NULL), |
100 GCCallback Heap::global_gc_prologue_callback_ = NULL; | 141 map_space_(NULL), |
101 GCCallback Heap::global_gc_epilogue_callback_ = NULL; | 142 cell_space_(NULL), |
102 | 143 lo_space_(NULL), |
103 // Variables set based on semispace_size_ and old_generation_size_ in | 144 global_gc_prologue_callback_(NULL), |
104 // ConfigureHeap. | 145 global_gc_epilogue_callback_(NULL), |
105 | 146 old_gen_promotion_limit_(kMinimumPromotionLimit), |
106 // Will be 4 * reserved_semispace_size_ to ensure that young | 147 old_gen_allocation_limit_(kMinimumAllocationLimit), |
107 // generation can be aligned to its size. | 148 old_gen_exhausted_(false), |
108 int Heap::survived_since_last_expansion_ = 0; | 149 amount_of_external_allocated_memory_(0), |
109 int Heap::external_allocation_limit_ = 0; | 150 amount_of_external_allocated_memory_at_last_global_gc_(0), |
110 | 151 heap_private_data_(*new HeapPrivateData()), |
111 Heap::HeapState Heap::gc_state_ = NOT_IN_GC; | 152 |
112 | 153 max_semispace_size_(kMaxSemispaceSize), |
113 int Heap::mc_count_ = 0; | 154 max_old_generation_size_(kMaxOldGenerationSize), |
114 int Heap::gc_count_ = 0; | 155 initial_semispace_size_(kInitialSemispaceSize), |
115 | 156 code_range_size_(kCodeRangeSize), |
116 int Heap::always_allocate_scope_depth_ = 0; | 157 |
117 int Heap::linear_allocation_scope_depth_ = 0; | 158 // The snapshot semispace size will be the default semispace size if |
118 bool Heap::context_disposed_pending_ = false; | 159 // snapshotting is used and will be the requested semispace size as |
| 160 // set up by ConfigureHeap otherwise. |
| 161 reserved_semispace_size_(kMaxSemispaceSize), |
| 162 |
| 163 // Variables set based on semispace_size_ and old_generation_size_ in |
| 164 // ConfigureHeap. |
| 165 |
| 166 // Will be 4 * reserved_semispace_size_ to ensure that young |
| 167 // generation can be aligned to its size. |
| 168 |
| 169 survived_since_last_expansion_(0), |
| 170 external_allocation_limit_(0), |
| 171 gc_state_(NOT_IN_GC), |
| 172 mc_count_(0), |
| 173 gc_count_(0), |
| 174 always_allocate_scope_depth_(0), |
| 175 linear_allocation_scope_depth_(0), |
| 176 context_disposed_pending_(false), |
119 | 177 |
120 #ifdef DEBUG | 178 #ifdef DEBUG |
121 bool Heap::allocation_allowed_ = true; | 179 allocation_allowed_(true), |
122 | 180 allocation_timeout_(0), |
123 int Heap::allocation_timeout_ = 0; | 181 disallow_allocation_failure_(false), |
124 bool Heap::disallow_allocation_failure_ = false; | |
125 #endif // DEBUG | 182 #endif // DEBUG |
126 | 183 heap_configured_(false) { |
| 184 for (int i = 0; i < kRootListLength; ++i) { |
| 185 roots_[i] = NULL; |
| 186 } |
| 187 } |
| 188 |
| 189 HeapData::~HeapData() { |
| 190 delete &heap_private_data_; |
| 191 } |
127 | 192 |
128 int Heap::Capacity() { | 193 int Heap::Capacity() { |
129 if (!HasBeenSetup()) return 0; | 194 if (!HasBeenSetup()) return 0; |
130 | 195 HeapData& heap_data = v8_context()->heap_data_; |
131 return new_space_.Capacity() + | 196 return heap_data.new_space_.Capacity() + |
132 old_pointer_space_->Capacity() + | 197 heap_data.old_pointer_space_->Capacity() + |
133 old_data_space_->Capacity() + | 198 heap_data.old_data_space_->Capacity() + |
134 code_space_->Capacity() + | 199 heap_data.code_space_->Capacity() + |
135 map_space_->Capacity() + | 200 heap_data.map_space_->Capacity() + |
136 cell_space_->Capacity(); | 201 heap_data.cell_space_->Capacity(); |
137 } | 202 } |
138 | 203 |
139 | 204 |
140 int Heap::CommittedMemory() { | 205 int Heap::CommittedMemory() { |
141 if (!HasBeenSetup()) return 0; | 206 if (!HasBeenSetup()) return 0; |
142 | 207 |
143 return new_space_.CommittedMemory() + | 208 HeapData& heap_data = v8_context()->heap_data_; |
144 old_pointer_space_->CommittedMemory() + | 209 return heap_data.new_space_.CommittedMemory() + |
145 old_data_space_->CommittedMemory() + | 210 heap_data.old_pointer_space_->CommittedMemory() + |
146 code_space_->CommittedMemory() + | 211 heap_data.old_data_space_->CommittedMemory() + |
147 map_space_->CommittedMemory() + | 212 heap_data.code_space_->CommittedMemory() + |
148 cell_space_->CommittedMemory() + | 213 heap_data.map_space_->CommittedMemory() + |
149 lo_space_->Size(); | 214 heap_data.cell_space_->CommittedMemory() + |
| 215 heap_data.lo_space_->Size(); |
150 } | 216 } |
151 | 217 |
152 | 218 |
153 int Heap::Available() { | 219 int Heap::Available() { |
154 if (!HasBeenSetup()) return 0; | 220 if (!HasBeenSetup()) return 0; |
155 | 221 |
156 return new_space_.Available() + | 222 HeapData& heap_data = v8_context()->heap_data_; |
157 old_pointer_space_->Available() + | 223 return heap_data.new_space_.Available() + |
158 old_data_space_->Available() + | 224 heap_data.old_pointer_space_->Available() + |
159 code_space_->Available() + | 225 heap_data.old_data_space_->Available() + |
160 map_space_->Available() + | 226 heap_data.code_space_->Available() + |
161 cell_space_->Available(); | 227 heap_data.map_space_->Available() + |
| 228 heap_data.cell_space_->Available(); |
162 } | 229 } |
163 | 230 |
164 | 231 |
165 bool Heap::HasBeenSetup() { | 232 bool Heap::HasBeenSetup() { |
166 return old_pointer_space_ != NULL && | 233 HeapData& heap_data = v8_context()->heap_data_; |
167 old_data_space_ != NULL && | 234 return heap_data.old_pointer_space_ != NULL && |
168 code_space_ != NULL && | 235 heap_data.old_data_space_ != NULL && |
169 map_space_ != NULL && | 236 heap_data.code_space_ != NULL && |
170 cell_space_ != NULL && | 237 heap_data.map_space_ != NULL && |
171 lo_space_ != NULL; | 238 heap_data.cell_space_ != NULL && |
| 239 heap_data.lo_space_ != NULL; |
172 } | 240 } |
173 | 241 |
174 | 242 |
175 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) { | 243 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) { |
176 // Is global GC requested? | 244 // Is global GC requested? |
177 if (space != NEW_SPACE || FLAG_gc_global) { | 245 if (space != NEW_SPACE || FLAG_gc_global) { |
178 Counters::gc_compactor_caused_by_request.Increment(); | 246 INC_COUNTER(gc_compactor_caused_by_request); |
179 return MARK_COMPACTOR; | 247 return MARK_COMPACTOR; |
180 } | 248 } |
181 | 249 |
182 // Is enough data promoted to justify a global GC? | 250 // Is enough data promoted to justify a global GC? |
183 if (OldGenerationPromotionLimitReached()) { | 251 if (OldGenerationPromotionLimitReached()) { |
184 Counters::gc_compactor_caused_by_promoted_data.Increment(); | 252 INC_COUNTER(gc_compactor_caused_by_promoted_data); |
185 return MARK_COMPACTOR; | 253 return MARK_COMPACTOR; |
186 } | 254 } |
187 | 255 |
188 // Have allocation in OLD and LO failed? | 256 // Have allocation in OLD and LO failed? |
189 if (old_gen_exhausted_) { | 257 if (v8_context()->heap_data_.old_gen_exhausted_) { |
190 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment(); | 258 INC_COUNTER(gc_compactor_caused_by_oldspace_exhaustion); |
191 return MARK_COMPACTOR; | 259 return MARK_COMPACTOR; |
192 } | 260 } |
193 | 261 |
194 // Is there enough space left in OLD to guarantee that a scavenge can | 262 // Is there enough space left in OLD to guarantee that a scavenge can |
195 // succeed? | 263 // succeed? |
196 // | 264 // |
197 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available | 265 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available |
198 // for object promotion. It counts only the bytes that the memory | 266 // for object promotion. It counts only the bytes that the memory |
199 // allocator has not yet allocated from the OS and assigned to any space, | 267 // allocator has not yet allocated from the OS and assigned to any space, |
200 // and does not count available bytes already in the old space or code | 268 // and does not count available bytes already in the old space or code |
201 // space. Undercounting is safe---we may get an unrequested full GC when | 269 // space. Undercounting is safe---we may get an unrequested full GC when |
202 // a scavenge would have succeeded. | 270 // a scavenge would have succeeded. |
203 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) { | 271 if (MemoryAllocator::MaxAvailable() <= |
204 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment(); | 272 v8_context()->heap_data_.new_space_.Size() |
| 273 ) { |
| 274 INC_COUNTER(gc_compactor_caused_by_oldspace_exhaustion); |
205 return MARK_COMPACTOR; | 275 return MARK_COMPACTOR; |
206 } | 276 } |
207 | 277 |
208 // Default | 278 // Default |
209 return SCAVENGER; | 279 return SCAVENGER; |
210 } | 280 } |
211 | 281 |
212 | 282 |
213 // TODO(1238405): Combine the infrastructure for --heap-stats and | 283 // TODO(1238405): Combine the infrastructure for --heap-stats and |
214 // --log-gc to avoid the complicated preprocessor and flag testing. | 284 // --log-gc to avoid the complicated preprocessor and flag testing. |
215 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 285 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
216 void Heap::ReportStatisticsBeforeGC() { | 286 void Heap::ReportStatisticsBeforeGC() { |
| 287 HeapData& heap_data = v8_context()->heap_data_; |
217 // Heap::ReportHeapStatistics will also log NewSpace statistics when | 288 // Heap::ReportHeapStatistics will also log NewSpace statistics when |
218 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The | 289 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The |
219 // following logic is used to avoid double logging. | 290 // following logic is used to avoid double logging. |
220 #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING) | 291 #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING) |
221 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics(); | 292 if (FLAG_heap_stats || FLAG_log_gc) heap_data.new_space_.CollectStatistics(); |
222 if (FLAG_heap_stats) { | 293 if (FLAG_heap_stats) { |
223 ReportHeapStatistics("Before GC"); | 294 ReportHeapStatistics("Before GC"); |
224 } else if (FLAG_log_gc) { | 295 } else if (FLAG_log_gc) { |
225 new_space_.ReportStatistics(); | 296 heap_data.new_space_.ReportStatistics(); |
226 } | 297 } |
227 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms(); | 298 if (FLAG_heap_stats || FLAG_log_gc) heap_data.new_space_.ClearHistograms(); |
228 #elif defined(DEBUG) | 299 #elif defined(DEBUG) |
229 if (FLAG_heap_stats) { | 300 if (FLAG_heap_stats) { |
230 new_space_.CollectStatistics(); | 301 heap_data.new_space_.CollectStatistics(); |
231 ReportHeapStatistics("Before GC"); | 302 ReportHeapStatistics("Before GC"); |
232 new_space_.ClearHistograms(); | 303 heap_data.new_space_.ClearHistograms(); |
233 } | 304 } |
234 #elif defined(ENABLE_LOGGING_AND_PROFILING) | 305 #elif defined(ENABLE_LOGGING_AND_PROFILING) |
235 if (FLAG_log_gc) { | 306 if (FLAG_log_gc) { |
236 new_space_.CollectStatistics(); | 307 heap_data.new_space_.CollectStatistics(); |
237 new_space_.ReportStatistics(); | 308 heap_data.new_space_.ReportStatistics(); |
238 new_space_.ClearHistograms(); | 309 heap_data.new_space_.ClearHistograms(); |
239 } | 310 } |
240 #endif | 311 #endif |
241 } | 312 } |
242 | 313 |
243 | 314 |
244 #if defined(ENABLE_LOGGING_AND_PROFILING) | 315 #if defined(ENABLE_LOGGING_AND_PROFILING) |
245 void Heap::PrintShortHeapStatistics() { | 316 void Heap::PrintShortHeapStatistics() { |
246 if (!FLAG_trace_gc_verbose) return; | 317 if (!FLAG_trace_gc_verbose) return; |
| 318 HeapData& heap_data = v8_context()->heap_data_; |
247 PrintF("Memory allocator, used: %8d, available: %8d\n", | 319 PrintF("Memory allocator, used: %8d, available: %8d\n", |
248 MemoryAllocator::Size(), | 320 MemoryAllocator::Size(), |
249 MemoryAllocator::Available()); | 321 MemoryAllocator::Available()); |
250 PrintF("New space, used: %8d, available: %8d\n", | 322 PrintF("New space, used: %8d, available: %8d\n", |
251 Heap::new_space_.Size(), | 323 heap_data.new_space_.Size(), |
252 new_space_.Available()); | 324 heap_data.new_space_.Available()); |
253 PrintF("Old pointers, used: %8d, available: %8d, waste: %8d\n", | 325 PrintF("Old pointers, used: %8d, available: %8d, waste: %8d\n", |
254 old_pointer_space_->Size(), | 326 heap_data.old_pointer_space_->Size(), |
255 old_pointer_space_->Available(), | 327 heap_data.old_pointer_space_->Available(), |
256 old_pointer_space_->Waste()); | 328 heap_data.old_pointer_space_->Waste()); |
257 PrintF("Old data space, used: %8d, available: %8d, waste: %8d\n", | 329 PrintF("Old data space, used: %8d, available: %8d, waste: %8d\n", |
258 old_data_space_->Size(), | 330 heap_data.old_data_space_->Size(), |
259 old_data_space_->Available(), | 331 heap_data.old_data_space_->Available(), |
260 old_data_space_->Waste()); | 332 heap_data.old_data_space_->Waste()); |
261 PrintF("Code space, used: %8d, available: %8d, waste: %8d\n", | 333 PrintF("Code space, used: %8d, available: %8d, waste: %8d\n", |
262 code_space_->Size(), | 334 heap_data.code_space_->Size(), |
263 code_space_->Available(), | 335 heap_data.code_space_->Available(), |
264 code_space_->Waste()); | 336 heap_data.code_space_->Waste()); |
265 PrintF("Map space, used: %8d, available: %8d, waste: %8d\n", | 337 PrintF("Map space, used: %8d, available: %8d, waste: %8d\n", |
266 map_space_->Size(), | 338 heap_data.map_space_->Size(), |
267 map_space_->Available(), | 339 heap_data.map_space_->Available(), |
268 map_space_->Waste()); | 340 heap_data.map_space_->Waste()); |
269 PrintF("Cell space, used: %8d, available: %8d, waste: %8d\n", | 341 PrintF("Cell space, used: %8d, available: %8d, waste: %8d\n", |
270 cell_space_->Size(), | 342 heap_data.cell_space_->Size(), |
271 cell_space_->Available(), | 343 heap_data.cell_space_->Available(), |
272 cell_space_->Waste()); | 344 heap_data.cell_space_->Waste()); |
273 PrintF("Large object space, used: %8d, avaialble: %8d\n", | 345 PrintF("Large object space, used: %8d, avaialble: %8d\n", |
274 lo_space_->Size(), | 346 heap_data.lo_space_->Size(), |
275 lo_space_->Available()); | 347 heap_data.lo_space_->Available()); |
276 } | 348 } |
277 #endif | 349 #endif |
278 | 350 |
279 | 351 |
280 // TODO(1238405): Combine the infrastructure for --heap-stats and | 352 // TODO(1238405): Combine the infrastructure for --heap-stats and |
281 // --log-gc to avoid the complicated preprocessor and flag testing. | 353 // --log-gc to avoid the complicated preprocessor and flag testing. |
282 void Heap::ReportStatisticsAfterGC() { | 354 void Heap::ReportStatisticsAfterGC() { |
| 355 HeapData& heap_data = v8_context()->heap_data_; |
283 // Similar to the before GC, we use some complicated logic to ensure that | 356 // Similar to the before GC, we use some complicated logic to ensure that |
284 // NewSpace statistics are logged exactly once when --log-gc is turned on. | 357 // NewSpace statistics are logged exactly once when --log-gc is turned on. |
285 #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING) | 358 #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING) |
286 if (FLAG_heap_stats) { | 359 if (FLAG_heap_stats) { |
287 new_space_.CollectStatistics(); | 360 heap_data.new_space_.CollectStatistics(); |
288 ReportHeapStatistics("After GC"); | 361 ReportHeapStatistics("After GC"); |
289 } else if (FLAG_log_gc) { | 362 } else if (FLAG_log_gc) { |
290 new_space_.ReportStatistics(); | 363 heap_data.new_space_.ReportStatistics(); |
291 } | 364 } |
292 #elif defined(DEBUG) | 365 #elif defined(DEBUG) |
293 if (FLAG_heap_stats) ReportHeapStatistics("After GC"); | 366 if (FLAG_heap_stats) ReportHeapStatistics("After GC"); |
294 #elif defined(ENABLE_LOGGING_AND_PROFILING) | 367 #elif defined(ENABLE_LOGGING_AND_PROFILING) |
295 if (FLAG_log_gc) new_space_.ReportStatistics(); | 368 if (FLAG_log_gc) heap_data.new_space_.ReportStatistics(); |
296 #endif | 369 #endif |
297 } | 370 } |
298 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 371 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
299 | 372 |
300 | 373 |
301 void Heap::GarbageCollectionPrologue() { | 374 void Heap::GarbageCollectionPrologue() { |
302 TranscendentalCache::Clear(); | 375 TranscendentalCache::Clear(); |
303 gc_count_++; | 376 HeapData& heap_data = v8_context()->heap_data_; |
| 377 heap_data.gc_count_++; |
304 #ifdef DEBUG | 378 #ifdef DEBUG |
305 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); | 379 ASSERT(heap_data.allocation_allowed_ && heap_data.gc_state_ == NOT_IN_GC); |
306 allow_allocation(false); | 380 allow_allocation(false); |
307 | 381 |
308 if (FLAG_verify_heap) { | 382 if (FLAG_verify_heap) { |
309 Verify(); | 383 Verify(); |
310 } | 384 } |
311 | 385 |
312 if (FLAG_gc_verbose) Print(); | 386 if (FLAG_gc_verbose) Print(); |
313 | 387 |
314 if (FLAG_print_rset) { | 388 if (FLAG_print_rset) { |
315 // Not all spaces have remembered set bits that we care about. | 389 // Not all spaces have remembered set bits that we care about. |
316 old_pointer_space_->PrintRSet(); | 390 heap_data.old_pointer_space_->PrintRSet(); |
317 map_space_->PrintRSet(); | 391 heap_data.map_space_->PrintRSet(); |
318 lo_space_->PrintRSet(); | 392 heap_data.lo_space_->PrintRSet(); |
319 } | 393 } |
320 #endif | 394 #endif |
321 | 395 |
322 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 396 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
323 ReportStatisticsBeforeGC(); | 397 ReportStatisticsBeforeGC(); |
324 #endif | 398 #endif |
325 } | 399 } |
326 | 400 |
327 int Heap::SizeOfObjects() { | 401 int Heap::SizeOfObjects() { |
328 int total = 0; | 402 int total = 0; |
(...skipping 12 matching lines...) Expand all Loading... |
341 if (FLAG_verify_heap) { | 415 if (FLAG_verify_heap) { |
342 Verify(); | 416 Verify(); |
343 } | 417 } |
344 | 418 |
345 if (FLAG_print_global_handles) GlobalHandles::Print(); | 419 if (FLAG_print_global_handles) GlobalHandles::Print(); |
346 if (FLAG_print_handles) PrintHandles(); | 420 if (FLAG_print_handles) PrintHandles(); |
347 if (FLAG_gc_verbose) Print(); | 421 if (FLAG_gc_verbose) Print(); |
348 if (FLAG_code_stats) ReportCodeStatistics("After GC"); | 422 if (FLAG_code_stats) ReportCodeStatistics("After GC"); |
349 #endif | 423 #endif |
350 | 424 |
351 Counters::alive_after_last_gc.Set(SizeOfObjects()); | 425 COUNTER(alive_after_last_gc).Set(SizeOfObjects()); |
352 | 426 |
353 Counters::symbol_table_capacity.Set(symbol_table()->Capacity()); | 427 COUNTER(symbol_table_capacity).Set(symbol_table()->Capacity()); |
354 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements()); | 428 COUNTER(number_of_symbols).Set(symbol_table()->NumberOfElements()); |
355 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 429 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
356 ReportStatisticsAfterGC(); | 430 ReportStatisticsAfterGC(); |
357 #endif | 431 #endif |
358 #ifdef ENABLE_DEBUGGER_SUPPORT | 432 #ifdef ENABLE_DEBUGGER_SUPPORT |
359 Debug::AfterGarbageCollection(); | 433 Debug::AfterGarbageCollection(); |
360 #endif | 434 #endif |
361 } | 435 } |
362 | 436 |
363 | 437 |
364 void Heap::CollectAllGarbage(bool force_compaction) { | 438 void Heap::CollectAllGarbage(bool force_compaction) { |
365 // Since we are ignoring the return value, the exact choice of space does | 439 // Since we are ignoring the return value, the exact choice of space does |
366 // not matter, so long as we do not specify NEW_SPACE, which would not | 440 // not matter, so long as we do not specify NEW_SPACE, which would not |
367 // cause a full GC. | 441 // cause a full GC. |
368 MarkCompactCollector::SetForceCompaction(force_compaction); | 442 MarkCompactCollector::SetForceCompaction(force_compaction); |
369 CollectGarbage(0, OLD_POINTER_SPACE); | 443 CollectGarbage(0, OLD_POINTER_SPACE); |
370 MarkCompactCollector::SetForceCompaction(false); | 444 MarkCompactCollector::SetForceCompaction(false); |
371 } | 445 } |
372 | 446 |
373 | 447 |
374 void Heap::CollectAllGarbageIfContextDisposed() { | 448 void Heap::CollectAllGarbageIfContextDisposed() { |
| 449 HeapData& heap_data = v8_context()->heap_data_; |
375 // If the garbage collector interface is exposed through the global | 450 // If the garbage collector interface is exposed through the global |
376 // gc() function, we avoid being clever about forcing GCs when | 451 // gc() function, we avoid being clever about forcing GCs when |
377 // contexts are disposed and leave it to the embedder to make | 452 // contexts are disposed and leave it to the embedder to make |
378 // informed decisions about when to force a collection. | 453 // informed decisions about when to force a collection. |
379 if (!FLAG_expose_gc && context_disposed_pending_) { | 454 if (!FLAG_expose_gc && heap_data.context_disposed_pending_) { |
380 HistogramTimerScope scope(&Counters::gc_context); | 455 HistogramTimerScope scope(&COUNTER(gc_context)); |
381 CollectAllGarbage(false); | 456 CollectAllGarbage(false); |
382 } | 457 } |
383 context_disposed_pending_ = false; | 458 heap_data.context_disposed_pending_ = false; |
384 } | 459 } |
385 | 460 |
386 | 461 |
387 void Heap::NotifyContextDisposed() { | 462 void Heap::NotifyContextDisposed() { |
388 context_disposed_pending_ = true; | 463 v8_context()->heap_data_.context_disposed_pending_ = true; |
389 } | 464 } |
390 | 465 |
391 | 466 |
392 bool Heap::CollectGarbage(int requested_size, AllocationSpace space) { | 467 bool Heap::CollectGarbage(int requested_size, AllocationSpace space) { |
393 // The VM is in the GC state until exiting this function. | 468 // The VM is in the GC state until exiting this function. |
394 VMState state(GC); | 469 VMState state(GC); |
395 | 470 |
| 471 HeapData& heap_data = v8_context()->heap_data_; |
| 472 |
396 #ifdef DEBUG | 473 #ifdef DEBUG |
397 // Reset the allocation timeout to the GC interval, but make sure to | 474 // Reset the allocation timeout to the GC interval, but make sure to |
398 // allow at least a few allocations after a collection. The reason | 475 // allow at least a few allocations after a collection. The reason |
399 // for this is that we have a lot of allocation sequences and we | 476 // for this is that we have a lot of allocation sequences and we |
400 // assume that a garbage collection will allow the subsequent | 477 // assume that a garbage collection will allow the subsequent |
401 // allocation attempts to go through. | 478 // allocation attempts to go through. |
402 allocation_timeout_ = Max(6, FLAG_gc_interval); | 479 heap_data.allocation_timeout_ = Max(6, FLAG_gc_interval); |
403 #endif | 480 #endif |
404 | 481 |
405 { GCTracer tracer; | 482 { GCTracer tracer; |
406 GarbageCollectionPrologue(); | 483 GarbageCollectionPrologue(); |
407 // The GC count was incremented in the prologue. Tell the tracer about | 484 // The GC count was incremented in the prologue. Tell the tracer about |
408 // it. | 485 // it. |
409 tracer.set_gc_count(gc_count_); | 486 tracer.set_gc_count(heap_data.gc_count_); |
410 | 487 |
411 GarbageCollector collector = SelectGarbageCollector(space); | 488 GarbageCollector collector = SelectGarbageCollector(space); |
412 // Tell the tracer which collector we've selected. | 489 // Tell the tracer which collector we've selected. |
413 tracer.set_collector(collector); | 490 tracer.set_collector(collector); |
414 | 491 |
415 HistogramTimer* rate = (collector == SCAVENGER) | 492 HistogramTimer* rate = (collector == SCAVENGER) |
416 ? &Counters::gc_scavenger | 493 ? &COUNTER(gc_scavenger) |
417 : &Counters::gc_compactor; | 494 : &COUNTER(gc_compactor); |
418 rate->Start(); | 495 rate->Start(); |
419 PerformGarbageCollection(space, collector, &tracer); | 496 PerformGarbageCollection(space, collector, &tracer); |
420 rate->Stop(); | 497 rate->Stop(); |
421 | 498 |
422 GarbageCollectionEpilogue(); | 499 GarbageCollectionEpilogue(); |
423 } | 500 } |
424 | 501 |
425 | 502 |
426 #ifdef ENABLE_LOGGING_AND_PROFILING | 503 #ifdef ENABLE_LOGGING_AND_PROFILING |
427 if (FLAG_log_gc) HeapProfiler::WriteSample(); | 504 if (FLAG_log_gc) HeapProfiler::WriteSample(); |
428 #endif | 505 #endif |
429 | 506 |
430 switch (space) { | 507 switch (space) { |
431 case NEW_SPACE: | 508 case NEW_SPACE: |
432 return new_space_.Available() >= requested_size; | 509 return heap_data.new_space_.Available() >= requested_size; |
433 case OLD_POINTER_SPACE: | 510 case OLD_POINTER_SPACE: |
434 return old_pointer_space_->Available() >= requested_size; | 511 return heap_data.old_pointer_space_->Available() >= requested_size; |
435 case OLD_DATA_SPACE: | 512 case OLD_DATA_SPACE: |
436 return old_data_space_->Available() >= requested_size; | 513 return heap_data.old_data_space_->Available() >= requested_size; |
437 case CODE_SPACE: | 514 case CODE_SPACE: |
438 return code_space_->Available() >= requested_size; | 515 return heap_data.code_space_->Available() >= requested_size; |
439 case MAP_SPACE: | 516 case MAP_SPACE: |
440 return map_space_->Available() >= requested_size; | 517 return heap_data.map_space_->Available() >= requested_size; |
441 case CELL_SPACE: | 518 case CELL_SPACE: |
442 return cell_space_->Available() >= requested_size; | 519 return heap_data.cell_space_->Available() >= requested_size; |
443 case LO_SPACE: | 520 case LO_SPACE: |
444 return lo_space_->Available() >= requested_size; | 521 return heap_data.lo_space_->Available() >= requested_size; |
445 } | 522 } |
446 return false; | 523 return false; |
447 } | 524 } |
448 | 525 |
449 | 526 |
450 void Heap::PerformScavenge() { | 527 void Heap::PerformScavenge() { |
451 GCTracer tracer; | 528 GCTracer tracer; |
452 PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer); | 529 PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer); |
453 } | 530 } |
454 | 531 |
(...skipping 18 matching lines...) Expand all Loading... |
473 | 550 |
474 static void VerifySymbolTable() { | 551 static void VerifySymbolTable() { |
475 #ifdef DEBUG | 552 #ifdef DEBUG |
476 SymbolTableVerifier verifier; | 553 SymbolTableVerifier verifier; |
477 Heap::symbol_table()->IterateElements(&verifier); | 554 Heap::symbol_table()->IterateElements(&verifier); |
478 #endif // DEBUG | 555 #endif // DEBUG |
479 } | 556 } |
480 | 557 |
481 | 558 |
482 void Heap::EnsureFromSpaceIsCommitted() { | 559 void Heap::EnsureFromSpaceIsCommitted() { |
483 if (new_space_.CommitFromSpaceIfNeeded()) return; | 560 HeapData& heap_data = v8_context()->heap_data_; |
| 561 if (heap_data.new_space_.CommitFromSpaceIfNeeded()) return; |
484 | 562 |
485 // Committing memory to from space failed. | 563 // Committing memory to from space failed. |
486 // Try shrinking and try again. | 564 // Try shrinking and try again. |
487 Shrink(); | 565 Shrink(); |
488 if (new_space_.CommitFromSpaceIfNeeded()) return; | 566 if (heap_data.new_space_.CommitFromSpaceIfNeeded()) return; |
489 | 567 |
490 // Committing memory to from space failed again. | 568 // Committing memory to from space failed again. |
491 // Memory is exhausted and we will die. | 569 // Memory is exhausted and we will die. |
492 V8::FatalProcessOutOfMemory("Committing semi space failed."); | 570 V8::FatalProcessOutOfMemory("Committing semi space failed."); |
493 } | 571 } |
494 | 572 |
495 | 573 |
496 void Heap::PerformGarbageCollection(AllocationSpace space, | 574 void Heap::PerformGarbageCollection(AllocationSpace space, |
497 GarbageCollector collector, | 575 GarbageCollector collector, |
498 GCTracer* tracer) { | 576 GCTracer* tracer) { |
499 VerifySymbolTable(); | 577 VerifySymbolTable(); |
500 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { | 578 HeapData& heap_data = v8_context()->heap_data_; |
501 ASSERT(!allocation_allowed_); | 579 if (collector == MARK_COMPACTOR && heap_data.global_gc_prologue_callback_) { |
502 global_gc_prologue_callback_(); | 580 ASSERT(!heap_data.allocation_allowed_); |
| 581 heap_data.global_gc_prologue_callback_(); |
503 } | 582 } |
504 EnsureFromSpaceIsCommitted(); | 583 EnsureFromSpaceIsCommitted(); |
505 if (collector == MARK_COMPACTOR) { | 584 if (collector == MARK_COMPACTOR) { |
506 MarkCompact(tracer); | 585 MarkCompact(tracer); |
507 | 586 |
508 int old_gen_size = PromotedSpaceSize(); | 587 int old_gen_size = PromotedSpaceSize(); |
509 old_gen_promotion_limit_ = | 588 heap_data.old_gen_promotion_limit_ = |
510 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); | 589 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); |
511 old_gen_allocation_limit_ = | 590 heap_data.old_gen_allocation_limit_ = |
512 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); | 591 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); |
513 old_gen_exhausted_ = false; | 592 heap_data.old_gen_exhausted_ = false; |
514 } | 593 } |
515 Scavenge(); | 594 Scavenge(); |
516 | 595 |
517 Counters::objs_since_last_young.Set(0); | 596 COUNTER(objs_since_last_young).Set(0); |
518 | 597 |
519 if (collector == MARK_COMPACTOR) { | 598 if (collector == MARK_COMPACTOR) { |
520 DisableAssertNoAllocation allow_allocation; | 599 DisableAssertNoAllocation allow_allocation; |
521 GlobalHandles::PostGarbageCollectionProcessing(); | 600 GlobalHandles::PostGarbageCollectionProcessing(); |
522 } | 601 } |
523 | 602 |
524 // Update relocatables. | 603 // Update relocatables. |
525 Relocatable::PostGarbageCollectionProcessing(); | 604 Relocatable::PostGarbageCollectionProcessing(); |
526 | 605 |
527 if (collector == MARK_COMPACTOR) { | 606 if (collector == MARK_COMPACTOR) { |
528 // Register the amount of external allocated memory. | 607 // Register the amount of external allocated memory. |
529 amount_of_external_allocated_memory_at_last_global_gc_ = | 608 heap_data.amount_of_external_allocated_memory_at_last_global_gc_ = |
530 amount_of_external_allocated_memory_; | 609 heap_data.amount_of_external_allocated_memory_; |
531 } | 610 } |
532 | 611 |
533 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) { | 612 if (collector == MARK_COMPACTOR && heap_data.global_gc_epilogue_callback_) { |
534 ASSERT(!allocation_allowed_); | 613 ASSERT(!heap_data.allocation_allowed_); |
535 global_gc_epilogue_callback_(); | 614 heap_data.global_gc_epilogue_callback_(); |
536 } | 615 } |
537 VerifySymbolTable(); | 616 VerifySymbolTable(); |
538 } | 617 } |
539 | 618 |
540 | 619 |
541 void Heap::MarkCompact(GCTracer* tracer) { | 620 void Heap::MarkCompact(GCTracer* tracer) { |
542 gc_state_ = MARK_COMPACT; | 621 HeapData& heap_data = v8_context()->heap_data_; |
543 mc_count_++; | 622 heap_data.gc_state_ = MARK_COMPACT; |
544 tracer->set_full_gc_count(mc_count_); | 623 heap_data.mc_count_++; |
| 624 tracer->set_full_gc_count(heap_data.mc_count_); |
545 LOG(ResourceEvent("markcompact", "begin")); | 625 LOG(ResourceEvent("markcompact", "begin")); |
546 | 626 |
547 MarkCompactCollector::Prepare(tracer); | 627 MarkCompactCollector::Prepare(tracer); |
548 | 628 |
549 bool is_compacting = MarkCompactCollector::IsCompacting(); | 629 bool is_compacting = MarkCompactCollector::IsCompacting(); |
550 | 630 |
551 MarkCompactPrologue(is_compacting); | 631 MarkCompactPrologue(is_compacting); |
552 | 632 |
553 MarkCompactCollector::CollectGarbage(); | 633 MarkCompactCollector::CollectGarbage(); |
554 | 634 |
555 MarkCompactEpilogue(is_compacting); | 635 MarkCompactEpilogue(is_compacting); |
556 | 636 |
557 LOG(ResourceEvent("markcompact", "end")); | 637 LOG(ResourceEvent("markcompact", "end")); |
558 | 638 |
559 gc_state_ = NOT_IN_GC; | 639 heap_data.gc_state_ = NOT_IN_GC; |
560 | 640 |
561 Shrink(); | 641 Shrink(); |
562 | 642 |
563 Counters::objs_since_last_full.Set(0); | 643 COUNTER(objs_since_last_full).Set(0); |
564 context_disposed_pending_ = false; | 644 heap_data.context_disposed_pending_ = false; |
565 } | 645 } |
566 | 646 |
567 | 647 |
568 void Heap::MarkCompactPrologue(bool is_compacting) { | 648 void Heap::MarkCompactPrologue(bool is_compacting) { |
569 // At any old GC clear the keyed lookup cache to enable collection of unused | 649 // At any old GC clear the keyed lookup cache to enable collection of unused |
570 // maps. | 650 // maps. |
571 KeyedLookupCache::Clear(); | 651 KeyedLookupCache::Clear(); |
572 ContextSlotCache::Clear(); | 652 ContextSlotCache::Clear(); |
573 DescriptorLookupCache::Clear(); | 653 DescriptorLookupCache::Clear(); |
574 | 654 |
575 CompilationCache::MarkCompactPrologue(); | 655 CompilationCache::MarkCompactPrologue(); |
576 | 656 |
577 Top::MarkCompactPrologue(is_compacting); | 657 Top::MarkCompactPrologue(is_compacting); |
578 ThreadManager::MarkCompactPrologue(is_compacting); | 658 ThreadManager::MarkCompactPrologue(is_compacting); |
579 } | 659 } |
580 | 660 |
581 | 661 |
582 void Heap::MarkCompactEpilogue(bool is_compacting) { | 662 void Heap::MarkCompactEpilogue(bool is_compacting) { |
583 Top::MarkCompactEpilogue(is_compacting); | 663 Top::MarkCompactEpilogue(is_compacting); |
584 ThreadManager::MarkCompactEpilogue(is_compacting); | 664 ThreadManager::MarkCompactEpilogue(is_compacting); |
585 } | 665 } |
586 | 666 |
587 | 667 |
588 Object* Heap::FindCodeObject(Address a) { | 668 Object* Heap::FindCodeObject(Address a) { |
589 Object* obj = code_space_->FindObject(a); | 669 HeapData& heap_data = v8_context()->heap_data_; |
| 670 Object* obj = heap_data.code_space_->FindObject(a); |
590 if (obj->IsFailure()) { | 671 if (obj->IsFailure()) { |
591 obj = lo_space_->FindObject(a); | 672 obj = heap_data.lo_space_->FindObject(a); |
592 } | 673 } |
593 ASSERT(!obj->IsFailure()); | 674 ASSERT(!obj->IsFailure()); |
594 return obj; | 675 return obj; |
595 } | 676 } |
596 | 677 |
597 | 678 |
598 // Helper class for copying HeapObjects | 679 // Helper class for copying HeapObjects |
599 class ScavengeVisitor: public ObjectVisitor { | 680 class ScavengeVisitor: public ObjectVisitor { |
600 public: | 681 public: |
601 | 682 |
602 void VisitPointer(Object** p) { ScavengePointer(p); } | 683 void VisitPointer(Object** p) { ScavengePointer(p); } |
603 | 684 |
604 void VisitPointers(Object** start, Object** end) { | 685 void VisitPointers(Object** start, Object** end) { |
605 // Copy all HeapObject pointers in [start, end) | 686 // Copy all HeapObject pointers in [start, end) |
606 for (Object** p = start; p < end; p++) ScavengePointer(p); | 687 for (Object** p = start; p < end; p++) ScavengePointer(p); |
607 } | 688 } |
608 | 689 |
609 private: | 690 private: |
610 void ScavengePointer(Object** p) { | 691 void ScavengePointer(Object** p) { |
611 Object* object = *p; | 692 Object* object = *p; |
612 if (!Heap::InNewSpace(object)) return; | 693 if (!Heap::InNewSpace(object)) return; |
613 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p), | 694 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p), |
614 reinterpret_cast<HeapObject*>(object)); | 695 reinterpret_cast<HeapObject*>(object)); |
615 } | 696 } |
616 }; | 697 }; |
617 | 698 |
618 | |
619 // A queue of pointers and maps of to-be-promoted objects during a | |
620 // scavenge collection. | |
621 class PromotionQueue { | |
622 public: | |
623 void Initialize(Address start_address) { | |
624 front_ = rear_ = reinterpret_cast<HeapObject**>(start_address); | |
625 } | |
626 | |
627 bool is_empty() { return front_ <= rear_; } | |
628 | |
629 void insert(HeapObject* object, Map* map) { | |
630 *(--rear_) = object; | |
631 *(--rear_) = map; | |
632 // Assert no overflow into live objects. | |
633 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top()); | |
634 } | |
635 | |
636 void remove(HeapObject** object, Map** map) { | |
637 *object = *(--front_); | |
638 *map = Map::cast(*(--front_)); | |
639 // Assert no underflow. | |
640 ASSERT(front_ >= rear_); | |
641 } | |
642 | |
643 private: | |
644 // The front of the queue is higher in memory than the rear. | |
645 HeapObject** front_; | |
646 HeapObject** rear_; | |
647 }; | |
648 | |
649 | |
650 // Shared state read by the scavenge collector and set by ScavengeObject. | |
651 static PromotionQueue promotion_queue; | |
652 | |
653 | |
654 #ifdef DEBUG | 699 #ifdef DEBUG |
655 // Visitor class to verify pointers in code or data space do not point into | 700 // Visitor class to verify pointers in code or data space do not point into |
656 // new space. | 701 // new space. |
657 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor { | 702 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor { |
658 public: | 703 public: |
659 void VisitPointers(Object** start, Object**end) { | 704 void VisitPointers(Object** start, Object**end) { |
660 for (Object** current = start; current < end; current++) { | 705 for (Object** current = start; current < end; current++) { |
661 if ((*current)->IsHeapObject()) { | 706 if ((*current)->IsHeapObject()) { |
662 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current))); | 707 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current))); |
663 } | 708 } |
(...skipping 16 matching lines...) Expand all Loading... |
680 while (data_it.has_next()) data_it.next()->Iterate(&v); | 725 while (data_it.has_next()) data_it.next()->Iterate(&v); |
681 } | 726 } |
682 #endif | 727 #endif |
683 | 728 |
684 | 729 |
685 void Heap::Scavenge() { | 730 void Heap::Scavenge() { |
686 #ifdef DEBUG | 731 #ifdef DEBUG |
687 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); | 732 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); |
688 #endif | 733 #endif |
689 | 734 |
690 gc_state_ = SCAVENGE; | 735 HeapData& heap_data = v8_context()->heap_data_; |
| 736 heap_data.gc_state_ = SCAVENGE; |
691 | 737 |
692 // Implements Cheney's copying algorithm | 738 // Implements Cheney's copying algorithm |
693 LOG(ResourceEvent("scavenge", "begin")); | 739 LOG(ResourceEvent("scavenge", "begin")); |
694 | 740 |
695 // Clear descriptor cache. | 741 // Clear descriptor cache. |
696 DescriptorLookupCache::Clear(); | 742 DescriptorLookupCache::Clear(); |
697 | 743 |
698 // Used for updating survived_since_last_expansion_ at function end. | 744 // Used for updating survived_since_last_expansion_ at function end. |
699 int survived_watermark = PromotedSpaceSize(); | 745 int survived_watermark = PromotedSpaceSize(); |
700 | 746 |
701 if (new_space_.Capacity() < new_space_.MaximumCapacity() && | 747 if (heap_data.new_space_.Capacity() < |
702 survived_since_last_expansion_ > new_space_.Capacity()) { | 748 heap_data.new_space_.MaximumCapacity() && |
| 749 heap_data.survived_since_last_expansion_ > |
| 750 heap_data.new_space_.Capacity() |
| 751 ) { |
703 // Grow the size of new space if there is room to grow and enough | 752 // Grow the size of new space if there is room to grow and enough |
704 // data has survived scavenge since the last expansion. | 753 // data has survived scavenge since the last expansion. |
705 new_space_.Grow(); | 754 heap_data.new_space_.Grow(); |
706 survived_since_last_expansion_ = 0; | 755 heap_data.survived_since_last_expansion_ = 0; |
707 } | 756 } |
708 | 757 |
709 // Flip the semispaces. After flipping, to space is empty, from space has | 758 // Flip the semispaces. After flipping, to space is empty, from space has |
710 // live objects. | 759 // live objects. |
711 new_space_.Flip(); | 760 heap_data.new_space_.Flip(); |
712 new_space_.ResetAllocationInfo(); | 761 heap_data.new_space_.ResetAllocationInfo(); |
713 | 762 |
714 // We need to sweep newly copied objects which can be either in the | 763 // We need to sweep newly copied objects which can be either in the |
715 // to space or promoted to the old generation. For to-space | 764 // to space or promoted to the old generation. For to-space |
716 // objects, we treat the bottom of the to space as a queue. Newly | 765 // objects, we treat the bottom of the to space as a queue. Newly |
717 // copied and unswept objects lie between a 'front' mark and the | 766 // copied and unswept objects lie between a 'front' mark and the |
718 // allocation pointer. | 767 // allocation pointer. |
719 // | 768 // |
720 // Promoted objects can go into various old-generation spaces, and | 769 // Promoted objects can go into various old-generation spaces, and |
721 // can be allocated internally in the spaces (from the free list). | 770 // can be allocated internally in the spaces (from the free list). |
722 // We treat the top of the to space as a queue of addresses of | 771 // We treat the top of the to space as a queue of addresses of |
723 // promoted objects. The addresses of newly promoted and unswept | 772 // promoted objects. The addresses of newly promoted and unswept |
724 // objects lie between a 'front' mark and a 'rear' mark that is | 773 // objects lie between a 'front' mark and a 'rear' mark that is |
725 // updated as a side effect of promoting an object. | 774 // updated as a side effect of promoting an object. |
726 // | 775 // |
727 // There is guaranteed to be enough room at the top of the to space | 776 // There is guaranteed to be enough room at the top of the to space |
728 // for the addresses of promoted objects: every object promoted | 777 // for the addresses of promoted objects: every object promoted |
729 // frees up its size in bytes from the top of the new space, and | 778 // frees up its size in bytes from the top of the new space, and |
730 // objects are at least one pointer in size. | 779 // objects are at least one pointer in size. |
731 Address new_space_front = new_space_.ToSpaceLow(); | 780 Address new_space_front = heap_data.new_space_.ToSpaceLow(); |
732 promotion_queue.Initialize(new_space_.ToSpaceHigh()); | 781 PromotionQueue& promotion_queue = |
| 782 heap_data.heap_private_data_.promotion_queue_; |
| 783 promotion_queue.Initialize(heap_data.new_space_.ToSpaceHigh()); |
733 | 784 |
734 ScavengeVisitor scavenge_visitor; | 785 ScavengeVisitor scavenge_visitor; |
735 // Copy roots. | 786 // Copy roots. |
736 IterateRoots(&scavenge_visitor, VISIT_ALL); | 787 IterateRoots(&scavenge_visitor, VISIT_ALL); |
737 | 788 |
738 // Copy objects reachable from the old generation. By definition, | 789 // Copy objects reachable from the old generation. By definition, |
739 // there are no intergenerational pointers in code or data spaces. | 790 // there are no intergenerational pointers in code or data spaces. |
740 IterateRSet(old_pointer_space_, &ScavengePointer); | 791 IterateRSet(heap_data.old_pointer_space_, &ScavengePointer); |
741 IterateRSet(map_space_, &ScavengePointer); | 792 IterateRSet(heap_data.map_space_, &ScavengePointer); |
742 lo_space_->IterateRSet(&ScavengePointer); | 793 heap_data.lo_space_->IterateRSet(&ScavengePointer); |
743 | 794 |
744 // Copy objects reachable from cells by scavenging cell values directly. | 795 // Copy objects reachable from cells by scavenging cell values directly. |
745 HeapObjectIterator cell_iterator(cell_space_); | 796 HeapObjectIterator cell_iterator(heap_data.cell_space_); |
746 while (cell_iterator.has_next()) { | 797 while (cell_iterator.has_next()) { |
747 HeapObject* cell = cell_iterator.next(); | 798 HeapObject* cell = cell_iterator.next(); |
748 if (cell->IsJSGlobalPropertyCell()) { | 799 if (cell->IsJSGlobalPropertyCell()) { |
749 Address value_address = | 800 Address value_address = |
750 reinterpret_cast<Address>(cell) + | 801 reinterpret_cast<Address>(cell) + |
751 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); | 802 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); |
752 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); | 803 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); |
753 } | 804 } |
754 } | 805 } |
755 | 806 |
756 do { | 807 do { |
757 ASSERT(new_space_front <= new_space_.top()); | 808 ASSERT(new_space_front <= heap_data.new_space_.top()); |
758 | 809 |
759 // The addresses new_space_front and new_space_.top() define a | 810 // The addresses new_space_front and new_space_.top() define a |
760 // queue of unprocessed copied objects. Process them until the | 811 // queue of unprocessed copied objects. Process them until the |
761 // queue is empty. | 812 // queue is empty. |
762 while (new_space_front < new_space_.top()) { | 813 while (new_space_front < heap_data.new_space_.top()) { |
763 HeapObject* object = HeapObject::FromAddress(new_space_front); | 814 HeapObject* object = HeapObject::FromAddress(new_space_front); |
764 object->Iterate(&scavenge_visitor); | 815 object->Iterate(&scavenge_visitor); |
765 new_space_front += object->Size(); | 816 new_space_front += object->Size(); |
766 } | 817 } |
767 | 818 |
768 // Promote and process all the to-be-promoted objects. | 819 // Promote and process all the to-be-promoted objects. |
769 while (!promotion_queue.is_empty()) { | 820 while (!promotion_queue.is_empty()) { |
770 HeapObject* source; | 821 HeapObject* source; |
771 Map* map; | 822 Map* map; |
772 promotion_queue.remove(&source, &map); | 823 promotion_queue.remove(&source, &map); |
773 // Copy the from-space object to its new location (given by the | 824 // Copy the from-space object to its new location (given by the |
774 // forwarding address) and fix its map. | 825 // forwarding address) and fix its map. |
775 HeapObject* target = source->map_word().ToForwardingAddress(); | 826 HeapObject* target = source->map_word().ToForwardingAddress(); |
776 CopyBlock(reinterpret_cast<Object**>(target->address()), | 827 CopyBlock(reinterpret_cast<Object**>(target->address()), |
777 reinterpret_cast<Object**>(source->address()), | 828 reinterpret_cast<Object**>(source->address()), |
778 source->SizeFromMap(map)); | 829 source->SizeFromMap(map)); |
779 target->set_map(map); | 830 target->set_map(map); |
780 | 831 |
781 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 832 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
782 // Update NewSpace stats if necessary. | 833 // Update NewSpace stats if necessary. |
783 RecordCopiedObject(target); | 834 RecordCopiedObject(target); |
784 #endif | 835 #endif |
785 // Visit the newly copied object for pointers to new space. | 836 // Visit the newly copied object for pointers to new space. |
786 target->Iterate(&scavenge_visitor); | 837 target->Iterate(&scavenge_visitor); |
787 UpdateRSet(target); | 838 UpdateRSet(target); |
788 } | 839 } |
789 | 840 |
790 // Take another spin if there are now unswept objects in new space | 841 // Take another spin if there are now unswept objects in new space |
791 // (there are currently no more unswept promoted objects). | 842 // (there are currently no more unswept promoted objects). |
792 } while (new_space_front < new_space_.top()); | 843 } while (new_space_front < heap_data.new_space_.top()); |
793 | 844 |
794 // Set age mark. | 845 // Set age mark. |
795 new_space_.set_age_mark(new_space_.top()); | 846 heap_data.new_space_.set_age_mark(heap_data.new_space_.top()); |
796 | 847 |
797 // Update how much has survived scavenge. | 848 // Update how much has survived scavenge. |
798 survived_since_last_expansion_ += | 849 heap_data.survived_since_last_expansion_ += |
799 (PromotedSpaceSize() - survived_watermark) + new_space_.Size(); | 850 (PromotedSpaceSize() - survived_watermark) + heap_data.new_space_.Size(); |
800 | 851 |
801 LOG(ResourceEvent("scavenge", "end")); | 852 LOG(ResourceEvent("scavenge", "end")); |
802 | 853 |
803 gc_state_ = NOT_IN_GC; | 854 heap_data.gc_state_ = NOT_IN_GC; |
804 } | 855 } |
805 | 856 |
806 | 857 |
807 void Heap::ClearRSetRange(Address start, int size_in_bytes) { | 858 void Heap::ClearRSetRange(Address start, int size_in_bytes) { |
808 uint32_t start_bit; | 859 uint32_t start_bit; |
809 Address start_word_address = | 860 Address start_word_address = |
810 Page::ComputeRSetBitPosition(start, 0, &start_bit); | 861 Page::ComputeRSetBitPosition(start, 0, &start_bit); |
811 uint32_t end_bit; | 862 uint32_t end_bit; |
812 Address end_word_address = | 863 Address end_word_address = |
813 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize, | 864 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize, |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
881 // Skip code object, we know it does not contain inter-generational | 932 // Skip code object, we know it does not contain inter-generational |
882 // pointers. | 933 // pointers. |
883 UpdateRSetVisitor v; | 934 UpdateRSetVisitor v; |
884 obj->Iterate(&v); | 935 obj->Iterate(&v); |
885 } | 936 } |
886 return obj->Size(); | 937 return obj->Size(); |
887 } | 938 } |
888 | 939 |
889 | 940 |
890 void Heap::RebuildRSets() { | 941 void Heap::RebuildRSets() { |
| 942 HeapData& heap_data = v8_context()->heap_data_; |
891 // By definition, we do not care about remembered set bits in code, | 943 // By definition, we do not care about remembered set bits in code, |
892 // data, or cell spaces. | 944 // data, or cell spaces. |
893 map_space_->ClearRSet(); | 945 heap_data.map_space_->ClearRSet(); |
894 RebuildRSets(map_space_); | 946 RebuildRSets(heap_data.map_space_); |
895 | 947 |
896 old_pointer_space_->ClearRSet(); | 948 heap_data.old_pointer_space_->ClearRSet(); |
897 RebuildRSets(old_pointer_space_); | 949 RebuildRSets(heap_data.old_pointer_space_); |
898 | 950 |
899 Heap::lo_space_->ClearRSet(); | 951 heap_data.lo_space_->ClearRSet(); |
900 RebuildRSets(lo_space_); | 952 RebuildRSets(heap_data.lo_space_); |
901 } | 953 } |
902 | 954 |
903 | 955 |
904 void Heap::RebuildRSets(PagedSpace* space) { | 956 void Heap::RebuildRSets(PagedSpace* space) { |
905 HeapObjectIterator it(space); | 957 HeapObjectIterator it(space); |
906 while (it.has_next()) Heap::UpdateRSet(it.next()); | 958 while (it.has_next()) Heap::UpdateRSet(it.next()); |
907 } | 959 } |
908 | 960 |
909 | 961 |
910 void Heap::RebuildRSets(LargeObjectSpace* space) { | 962 void Heap::RebuildRSets(LargeObjectSpace* space) { |
911 LargeObjectIterator it(space); | 963 LargeObjectIterator it(space); |
912 while (it.has_next()) Heap::UpdateRSet(it.next()); | 964 while (it.has_next()) Heap::UpdateRSet(it.next()); |
913 } | 965 } |
914 | 966 |
915 | 967 |
916 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 968 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
917 void Heap::RecordCopiedObject(HeapObject* obj) { | 969 void Heap::RecordCopiedObject(HeapObject* obj) { |
918 bool should_record = false; | 970 bool should_record = false; |
919 #ifdef DEBUG | 971 #ifdef DEBUG |
920 should_record = FLAG_heap_stats; | 972 should_record = FLAG_heap_stats; |
921 #endif | 973 #endif |
922 #ifdef ENABLE_LOGGING_AND_PROFILING | 974 #ifdef ENABLE_LOGGING_AND_PROFILING |
923 should_record = should_record || FLAG_log_gc; | 975 should_record = should_record || FLAG_log_gc; |
924 #endif | 976 #endif |
925 if (should_record) { | 977 if (should_record) { |
926 if (new_space_.Contains(obj)) { | 978 HeapData& heap_data = v8_context()->heap_data_; |
927 new_space_.RecordAllocation(obj); | 979 if (heap_data.new_space_.Contains(obj)) { |
| 980 heap_data.new_space_.RecordAllocation(obj); |
928 } else { | 981 } else { |
929 new_space_.RecordPromotion(obj); | 982 heap_data.new_space_.RecordPromotion(obj); |
930 } | 983 } |
931 } | 984 } |
932 } | 985 } |
933 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 986 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
934 | 987 |
935 | 988 |
936 | 989 |
937 HeapObject* Heap::MigrateObject(HeapObject* source, | 990 HeapObject* Heap::MigrateObject(HeapObject* source, |
938 HeapObject* target, | 991 HeapObject* target, |
939 int size) { | 992 int size) { |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
977 // active semispace of the young generation and not already copied. | 1030 // active semispace of the young generation and not already copied. |
978 if (!InNewSpace(object)) return; | 1031 if (!InNewSpace(object)) return; |
979 first_word = object->map_word(); | 1032 first_word = object->map_word(); |
980 if (first_word.IsForwardingAddress()) { | 1033 if (first_word.IsForwardingAddress()) { |
981 *p = first_word.ToForwardingAddress(); | 1034 *p = first_word.ToForwardingAddress(); |
982 return; | 1035 return; |
983 } | 1036 } |
984 } | 1037 } |
985 | 1038 |
986 int object_size = object->SizeFromMap(first_word.ToMap()); | 1039 int object_size = object->SizeFromMap(first_word.ToMap()); |
| 1040 HeapData& heap_data = v8_context()->heap_data_; |
987 // We rely on live objects in new space to be at least two pointers, | 1041 // We rely on live objects in new space to be at least two pointers, |
988 // so we can store the from-space address and map pointer of promoted | 1042 // so we can store the from-space address and map pointer of promoted |
989 // objects in the to space. | 1043 // objects in the to space. |
990 ASSERT(object_size >= 2 * kPointerSize); | 1044 ASSERT(object_size >= 2 * kPointerSize); |
991 | 1045 |
992 // If the object should be promoted, we try to copy it to old space. | 1046 // If the object should be promoted, we try to copy it to old space. |
993 if (ShouldBePromoted(object->address(), object_size)) { | 1047 if (ShouldBePromoted(object->address(), object_size)) { |
994 Object* result; | 1048 Object* result; |
| 1049 PromotionQueue& promotion_queue = |
| 1050 heap_data.heap_private_data_.promotion_queue_; |
| 1051 |
995 if (object_size > MaxObjectSizeInPagedSpace()) { | 1052 if (object_size > MaxObjectSizeInPagedSpace()) { |
996 result = lo_space_->AllocateRawFixedArray(object_size); | 1053 result = heap_data.lo_space_->AllocateRawFixedArray(object_size); |
997 if (!result->IsFailure()) { | 1054 if (!result->IsFailure()) { |
998 // Save the from-space object pointer and its map pointer at the | 1055 // Save the from-space object pointer and its map pointer at the |
999 // top of the to space to be swept and copied later. Write the | 1056 // top of the to space to be swept and copied later. Write the |
1000 // forwarding address over the map word of the from-space | 1057 // forwarding address over the map word of the from-space |
1001 // object. | 1058 // object. |
1002 HeapObject* target = HeapObject::cast(result); | 1059 HeapObject* target = HeapObject::cast(result); |
1003 promotion_queue.insert(object, first_word.ToMap()); | 1060 promotion_queue.insert(object, first_word.ToMap()); |
1004 object->set_map_word(MapWord::FromForwardingAddress(target)); | 1061 object->set_map_word(MapWord::FromForwardingAddress(target)); |
1005 | 1062 |
1006 // Give the space allocated for the result a proper map by | 1063 // Give the space allocated for the result a proper map by |
1007 // treating it as a free list node (not linked into the free | 1064 // treating it as a free list node (not linked into the free |
1008 // list). | 1065 // list). |
1009 FreeListNode* node = FreeListNode::FromAddress(target->address()); | 1066 FreeListNode* node = FreeListNode::FromAddress(target->address()); |
1010 node->set_size(object_size); | 1067 node->set_size(object_size); |
1011 | 1068 |
1012 *p = target; | 1069 *p = target; |
1013 return; | 1070 return; |
1014 } | 1071 } |
1015 } else { | 1072 } else { |
1016 OldSpace* target_space = Heap::TargetSpace(object); | 1073 OldSpace* target_space = Heap::TargetSpace(object); |
1017 ASSERT(target_space == Heap::old_pointer_space_ || | 1074 ASSERT(target_space == heap_data.old_pointer_space_ || |
1018 target_space == Heap::old_data_space_); | 1075 target_space == heap_data.old_data_space_); |
1019 result = target_space->AllocateRaw(object_size); | 1076 result = target_space->AllocateRaw(object_size); |
1020 if (!result->IsFailure()) { | 1077 if (!result->IsFailure()) { |
1021 HeapObject* target = HeapObject::cast(result); | 1078 HeapObject* target = HeapObject::cast(result); |
1022 if (target_space == Heap::old_pointer_space_) { | 1079 if (target_space == heap_data.old_pointer_space_) { |
1023 // Save the from-space object pointer and its map pointer at the | 1080 // Save the from-space object pointer and its map pointer at the |
1024 // top of the to space to be swept and copied later. Write the | 1081 // top of the to space to be swept and copied later. Write the |
1025 // forwarding address over the map word of the from-space | 1082 // forwarding address over the map word of the from-space |
1026 // object. | 1083 // object. |
1027 promotion_queue.insert(object, first_word.ToMap()); | 1084 promotion_queue.insert(object, first_word.ToMap()); |
1028 object->set_map_word(MapWord::FromForwardingAddress(target)); | 1085 object->set_map_word(MapWord::FromForwardingAddress(target)); |
1029 | 1086 |
1030 // Give the space allocated for the result a proper map by | 1087 // Give the space allocated for the result a proper map by |
1031 // treating it as a free list node (not linked into the free | 1088 // treating it as a free list node (not linked into the free |
1032 // list). | 1089 // list). |
(...skipping 10 matching lines...) Expand all Loading... |
1043 #ifdef DEBUG | 1100 #ifdef DEBUG |
1044 VerifyNonPointerSpacePointersVisitor v; | 1101 VerifyNonPointerSpacePointersVisitor v; |
1045 (*p)->Iterate(&v); | 1102 (*p)->Iterate(&v); |
1046 #endif | 1103 #endif |
1047 } | 1104 } |
1048 return; | 1105 return; |
1049 } | 1106 } |
1050 } | 1107 } |
1051 } | 1108 } |
1052 // The object should remain in new space or the old space allocation failed. | 1109 // The object should remain in new space or the old space allocation failed. |
1053 Object* result = new_space_.AllocateRaw(object_size); | 1110 Object* result = heap_data.new_space_.AllocateRaw(object_size); |
1054 // Failed allocation at this point is utterly unexpected. | 1111 // Failed allocation at this point is utterly unexpected. |
1055 ASSERT(!result->IsFailure()); | 1112 ASSERT(!result->IsFailure()); |
1056 *p = MigrateObject(object, HeapObject::cast(result), object_size); | 1113 *p = MigrateObject(object, HeapObject::cast(result), object_size); |
1057 } | 1114 } |
1058 | 1115 |
1059 | 1116 |
1060 void Heap::ScavengePointer(HeapObject** p) { | 1117 void Heap::ScavengePointer(HeapObject** p) { |
1061 ScavengeObject(p, *p); | 1118 ScavengeObject(p, *p); |
1062 } | 1119 } |
1063 | 1120 |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1116 | 1173 |
1117 const Heap::StructTable Heap::struct_table[] = { | 1174 const Heap::StructTable Heap::struct_table[] = { |
1118 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \ | 1175 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \ |
1119 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex }, | 1176 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex }, |
1120 STRUCT_LIST(STRUCT_TABLE_ELEMENT) | 1177 STRUCT_LIST(STRUCT_TABLE_ELEMENT) |
1121 #undef STRUCT_TABLE_ELEMENT | 1178 #undef STRUCT_TABLE_ELEMENT |
1122 }; | 1179 }; |
1123 | 1180 |
1124 | 1181 |
1125 bool Heap::CreateInitialMaps() { | 1182 bool Heap::CreateInitialMaps() { |
| 1183 HeapData& heap_data = v8_context()->heap_data_; |
1126 Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize); | 1184 Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize); |
1127 if (obj->IsFailure()) return false; | 1185 if (obj->IsFailure()) return false; |
1128 // Map::cast cannot be used due to uninitialized map field. | 1186 // Map::cast cannot be used due to uninitialized map field. |
1129 Map* new_meta_map = reinterpret_cast<Map*>(obj); | 1187 Map* new_meta_map = reinterpret_cast<Map*>(obj); |
1130 set_meta_map(new_meta_map); | 1188 set_meta_map(new_meta_map); |
1131 new_meta_map->set_map(new_meta_map); | 1189 new_meta_map->set_map(new_meta_map); |
1132 | 1190 |
1133 obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize); | 1191 obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize); |
1134 if (obj->IsFailure()) return false; | 1192 if (obj->IsFailure()) return false; |
1135 set_fixed_array_map(Map::cast(obj)); | 1193 set_fixed_array_map(Map::cast(obj)); |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1177 set_heap_number_map(Map::cast(obj)); | 1235 set_heap_number_map(Map::cast(obj)); |
1178 | 1236 |
1179 obj = AllocateMap(PROXY_TYPE, Proxy::kSize); | 1237 obj = AllocateMap(PROXY_TYPE, Proxy::kSize); |
1180 if (obj->IsFailure()) return false; | 1238 if (obj->IsFailure()) return false; |
1181 set_proxy_map(Map::cast(obj)); | 1239 set_proxy_map(Map::cast(obj)); |
1182 | 1240 |
1183 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) { | 1241 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) { |
1184 const StringTypeTable& entry = string_type_table[i]; | 1242 const StringTypeTable& entry = string_type_table[i]; |
1185 obj = AllocateMap(entry.type, entry.size); | 1243 obj = AllocateMap(entry.type, entry.size); |
1186 if (obj->IsFailure()) return false; | 1244 if (obj->IsFailure()) return false; |
1187 roots_[entry.index] = Map::cast(obj); | 1245 heap_data.roots_[entry.index] = Map::cast(obj); |
1188 } | 1246 } |
1189 | 1247 |
1190 obj = AllocateMap(STRING_TYPE, SeqTwoByteString::kAlignedSize); | 1248 obj = AllocateMap(STRING_TYPE, SeqTwoByteString::kAlignedSize); |
1191 if (obj->IsFailure()) return false; | 1249 if (obj->IsFailure()) return false; |
1192 set_undetectable_string_map(Map::cast(obj)); | 1250 set_undetectable_string_map(Map::cast(obj)); |
1193 Map::cast(obj)->set_is_undetectable(); | 1251 Map::cast(obj)->set_is_undetectable(); |
1194 | 1252 |
1195 obj = AllocateMap(ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize); | 1253 obj = AllocateMap(ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize); |
1196 if (obj->IsFailure()) return false; | 1254 if (obj->IsFailure()) return false; |
1197 set_undetectable_ascii_string_map(Map::cast(obj)); | 1255 set_undetectable_ascii_string_map(Map::cast(obj)); |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1254 set_one_pointer_filler_map(Map::cast(obj)); | 1312 set_one_pointer_filler_map(Map::cast(obj)); |
1255 | 1313 |
1256 obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize); | 1314 obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize); |
1257 if (obj->IsFailure()) return false; | 1315 if (obj->IsFailure()) return false; |
1258 set_two_pointer_filler_map(Map::cast(obj)); | 1316 set_two_pointer_filler_map(Map::cast(obj)); |
1259 | 1317 |
1260 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) { | 1318 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) { |
1261 const StructTable& entry = struct_table[i]; | 1319 const StructTable& entry = struct_table[i]; |
1262 obj = AllocateMap(entry.type, entry.size); | 1320 obj = AllocateMap(entry.type, entry.size); |
1263 if (obj->IsFailure()) return false; | 1321 if (obj->IsFailure()) return false; |
1264 roots_[entry.index] = Map::cast(obj); | 1322 heap_data.roots_[entry.index] = Map::cast(obj); |
1265 } | 1323 } |
1266 | 1324 |
1267 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); | 1325 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); |
1268 if (obj->IsFailure()) return false; | 1326 if (obj->IsFailure()) return false; |
1269 set_hash_table_map(Map::cast(obj)); | 1327 set_hash_table_map(Map::cast(obj)); |
1270 | 1328 |
1271 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); | 1329 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); |
1272 if (obj->IsFailure()) return false; | 1330 if (obj->IsFailure()) return false; |
1273 set_context_map(Map::cast(obj)); | 1331 set_context_map(Map::cast(obj)); |
1274 | 1332 |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1311 } | 1369 } |
1312 | 1370 |
1313 | 1371 |
1314 Object* Heap::AllocateHeapNumber(double value) { | 1372 Object* Heap::AllocateHeapNumber(double value) { |
1315 // Use general version, if we're forced to always allocate. | 1373 // Use general version, if we're forced to always allocate. |
1316 if (always_allocate()) return AllocateHeapNumber(value, TENURED); | 1374 if (always_allocate()) return AllocateHeapNumber(value, TENURED); |
1317 | 1375 |
1318 // This version of AllocateHeapNumber is optimized for | 1376 // This version of AllocateHeapNumber is optimized for |
1319 // allocation in new space. | 1377 // allocation in new space. |
1320 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize); | 1378 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize); |
1321 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); | 1379 HeapData& heap_data = v8_context()->heap_data_; |
1322 Object* result = new_space_.AllocateRaw(HeapNumber::kSize); | 1380 ASSERT(heap_data.allocation_allowed_ && heap_data.gc_state_ == NOT_IN_GC); |
| 1381 Object* result = heap_data.new_space_.AllocateRaw(HeapNumber::kSize); |
1323 if (result->IsFailure()) return result; | 1382 if (result->IsFailure()) return result; |
1324 HeapObject::cast(result)->set_map(heap_number_map()); | 1383 HeapObject::cast(result)->set_map(heap_number_map()); |
1325 HeapNumber::cast(result)->set_value(value); | 1384 HeapNumber::cast(result)->set_value(value); |
1326 return result; | 1385 return result; |
1327 } | 1386 } |
1328 | 1387 |
1329 | 1388 |
1330 Object* Heap::AllocateJSGlobalPropertyCell(Object* value) { | 1389 Object* Heap::AllocateJSGlobalPropertyCell(Object* value) { |
1331 Object* result = AllocateRawCell(); | 1390 Object* result = AllocateRawCell(); |
1332 if (result->IsFailure()) return result; | 1391 if (result->IsFailure()) return result; |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1415 Heap::CreateJSEntryStub(); | 1474 Heap::CreateJSEntryStub(); |
1416 Heap::CreateJSConstructEntryStub(); | 1475 Heap::CreateJSConstructEntryStub(); |
1417 #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP | 1476 #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP |
1418 Heap::CreateRegExpCEntryStub(); | 1477 Heap::CreateRegExpCEntryStub(); |
1419 #endif | 1478 #endif |
1420 } | 1479 } |
1421 | 1480 |
1422 | 1481 |
1423 bool Heap::CreateInitialObjects() { | 1482 bool Heap::CreateInitialObjects() { |
1424 Object* obj; | 1483 Object* obj; |
| 1484 HeapData& heap_data = v8_context()->heap_data_; |
1425 | 1485 |
1426 // The -0 value must be set before NumberFromDouble works. | 1486 // The -0 value must be set before NumberFromDouble works. |
1427 obj = AllocateHeapNumber(-0.0, TENURED); | 1487 obj = AllocateHeapNumber(-0.0, TENURED); |
1428 if (obj->IsFailure()) return false; | 1488 if (obj->IsFailure()) return false; |
1429 set_minus_zero_value(obj); | 1489 set_minus_zero_value(obj); |
1430 ASSERT(signbit(minus_zero_value()->Number()) != 0); | 1490 ASSERT(signbit(minus_zero_value()->Number()) != 0); |
1431 | 1491 |
1432 obj = AllocateHeapNumber(OS::nan_value(), TENURED); | 1492 obj = AllocateHeapNumber(OS::nan_value(), TENURED); |
1433 if (obj->IsFailure()) return false; | 1493 if (obj->IsFailure()) return false; |
1434 set_nan_value(obj); | 1494 set_nan_value(obj); |
1435 | 1495 |
1436 obj = Allocate(oddball_map(), OLD_DATA_SPACE); | 1496 obj = Allocate(oddball_map(), OLD_DATA_SPACE); |
1437 if (obj->IsFailure()) return false; | 1497 if (obj->IsFailure()) return false; |
1438 set_undefined_value(obj); | 1498 set_undefined_value(obj); |
1439 ASSERT(!InNewSpace(undefined_value())); | 1499 ASSERT(!InNewSpace(undefined_value())); |
1440 | 1500 |
1441 // Allocate initial symbol table. | 1501 // Allocate initial symbol table. |
1442 obj = SymbolTable::Allocate(kInitialSymbolTableSize); | 1502 obj = SymbolTable::Allocate(kInitialSymbolTableSize); |
1443 if (obj->IsFailure()) return false; | 1503 if (obj->IsFailure()) return false; |
1444 // Don't use set_symbol_table() due to asserts. | 1504 // Don't use set_symbol_table() due to asserts. |
1445 roots_[kSymbolTableRootIndex] = obj; | 1505 heap_data.roots_[kSymbolTableRootIndex] = obj; |
1446 | 1506 |
1447 // Assign the print strings for oddballs after creating symboltable. | 1507 // Assign the print strings for oddballs after creating symboltable. |
1448 Object* symbol = LookupAsciiSymbol("undefined"); | 1508 Object* symbol = LookupAsciiSymbol("undefined"); |
1449 if (symbol->IsFailure()) return false; | 1509 if (symbol->IsFailure()) return false; |
1450 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol)); | 1510 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol)); |
1451 Oddball::cast(undefined_value())->set_to_number(nan_value()); | 1511 Oddball::cast(undefined_value())->set_to_number(nan_value()); |
1452 | 1512 |
1453 // Assign the print strings for oddballs after creating symboltable. | 1513 // Assign the print strings for oddballs after creating symboltable. |
1454 symbol = LookupAsciiSymbol("null"); | 1514 symbol = LookupAsciiSymbol("null"); |
1455 if (symbol->IsFailure()) return false; | 1515 if (symbol->IsFailure()) return false; |
(...skipping 26 matching lines...) Expand all Loading... |
1482 set_termination_exception(obj); | 1542 set_termination_exception(obj); |
1483 | 1543 |
1484 // Allocate the empty string. | 1544 // Allocate the empty string. |
1485 obj = AllocateRawAsciiString(0, TENURED); | 1545 obj = AllocateRawAsciiString(0, TENURED); |
1486 if (obj->IsFailure()) return false; | 1546 if (obj->IsFailure()) return false; |
1487 set_empty_string(String::cast(obj)); | 1547 set_empty_string(String::cast(obj)); |
1488 | 1548 |
1489 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) { | 1549 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) { |
1490 obj = LookupAsciiSymbol(constant_symbol_table[i].contents); | 1550 obj = LookupAsciiSymbol(constant_symbol_table[i].contents); |
1491 if (obj->IsFailure()) return false; | 1551 if (obj->IsFailure()) return false; |
1492 roots_[constant_symbol_table[i].index] = String::cast(obj); | 1552 heap_data.roots_[constant_symbol_table[i].index] = String::cast(obj); |
1493 } | 1553 } |
1494 | 1554 |
1495 // Allocate the hidden symbol which is used to identify the hidden properties | 1555 // Allocate the hidden symbol which is used to identify the hidden properties |
1496 // in JSObjects. The hash code has a special value so that it will not match | 1556 // in JSObjects. The hash code has a special value so that it will not match |
1497 // the empty string when searching for the property. It cannot be part of the | 1557 // the empty string when searching for the property. It cannot be part of the |
1498 // loop above because it needs to be allocated manually with the special | 1558 // loop above because it needs to be allocated manually with the special |
1499 // hash code in place. The hash code for the hidden_symbol is zero to ensure | 1559 // hash code in place. The hash code for the hidden_symbol is zero to ensure |
1500 // that it will always be at the first entry in property descriptors. | 1560 // that it will always be at the first entry in property descriptors. |
1501 obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask); | 1561 obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask); |
1502 if (obj->IsFailure()) return false; | 1562 if (obj->IsFailure()) return false; |
1503 hidden_symbol_ = String::cast(obj); | 1563 heap_data.hidden_symbol_ = String::cast(obj); |
1504 | 1564 |
1505 // Allocate the proxy for __proto__. | 1565 // Allocate the proxy for __proto__. |
1506 obj = AllocateProxy((Address) &Accessors::ObjectPrototype); | 1566 obj = AllocateProxy((Address) &Accessors::ObjectPrototype); |
1507 if (obj->IsFailure()) return false; | 1567 if (obj->IsFailure()) return false; |
1508 set_prototype_accessors(Proxy::cast(obj)); | 1568 set_prototype_accessors(Proxy::cast(obj)); |
1509 | 1569 |
1510 // Allocate the code_stubs dictionary. The initial size is set to avoid | 1570 // Allocate the code_stubs dictionary. The initial size is set to avoid |
1511 // expanding the dictionary during bootstrapping. | 1571 // expanding the dictionary during bootstrapping. |
1512 obj = NumberDictionary::Allocate(128); | 1572 obj = NumberDictionary::Allocate(128); |
1513 if (obj->IsFailure()) return false; | 1573 if (obj->IsFailure()) return false; |
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1654 Object* result = AllocateStringFromAscii(CStrVector(str)); | 1714 Object* result = AllocateStringFromAscii(CStrVector(str)); |
1655 | 1715 |
1656 if (!result->IsFailure()) { | 1716 if (!result->IsFailure()) { |
1657 SetNumberStringCache(number, String::cast(result)); | 1717 SetNumberStringCache(number, String::cast(result)); |
1658 } | 1718 } |
1659 return result; | 1719 return result; |
1660 } | 1720 } |
1661 | 1721 |
1662 | 1722 |
1663 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) { | 1723 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) { |
1664 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]); | 1724 return Map::cast(v8_context()->heap_data_.roots_[ |
| 1725 RootIndexForExternalArrayType(array_type)]); |
1665 } | 1726 } |
1666 | 1727 |
1667 | 1728 |
1668 Heap::RootListIndex Heap::RootIndexForExternalArrayType( | 1729 Heap::RootListIndex Heap::RootIndexForExternalArrayType( |
1669 ExternalArrayType array_type) { | 1730 ExternalArrayType array_type) { |
1670 switch (array_type) { | 1731 switch (array_type) { |
1671 case kExternalByteArray: | 1732 case kExternalByteArray: |
1672 return kExternalByteArrayMapRootIndex; | 1733 return kExternalByteArrayMapRootIndex; |
1673 case kExternalUnsignedByteArray: | 1734 case kExternalUnsignedByteArray: |
1674 return kExternalUnsignedByteArrayMapRootIndex; | 1735 return kExternalUnsignedByteArrayMapRootIndex; |
(...skipping 384 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2059 external_pointer); | 2120 external_pointer); |
2060 | 2121 |
2061 return result; | 2122 return result; |
2062 } | 2123 } |
2063 | 2124 |
2064 | 2125 |
2065 Object* Heap::CreateCode(const CodeDesc& desc, | 2126 Object* Heap::CreateCode(const CodeDesc& desc, |
2066 ZoneScopeInfo* sinfo, | 2127 ZoneScopeInfo* sinfo, |
2067 Code::Flags flags, | 2128 Code::Flags flags, |
2068 Handle<Object> self_reference) { | 2129 Handle<Object> self_reference) { |
| 2130 HeapData& heap_data = v8_context()->heap_data_; |
2069 // Compute size | 2131 // Compute size |
2070 int body_size = RoundUp(desc.instr_size + desc.reloc_size, kObjectAlignment); | 2132 int body_size = RoundUp(desc.instr_size + desc.reloc_size, kObjectAlignment); |
2071 int sinfo_size = 0; | 2133 int sinfo_size = 0; |
2072 if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL); | 2134 if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL); |
2073 int obj_size = Code::SizeFor(body_size, sinfo_size); | 2135 int obj_size = Code::SizeFor(body_size, sinfo_size); |
2074 ASSERT(IsAligned(obj_size, Code::kCodeAlignment)); | 2136 ASSERT(IsAligned(obj_size, Code::kCodeAlignment)); |
2075 Object* result; | 2137 Object* result; |
2076 if (obj_size > MaxObjectSizeInPagedSpace()) { | 2138 if (obj_size > MaxObjectSizeInPagedSpace()) { |
2077 result = lo_space_->AllocateRawCode(obj_size); | 2139 result = heap_data.lo_space_->AllocateRawCode(obj_size); |
2078 } else { | 2140 } else { |
2079 result = code_space_->AllocateRaw(obj_size); | 2141 result = heap_data.code_space_->AllocateRaw(obj_size); |
2080 } | 2142 } |
2081 | 2143 |
2082 if (result->IsFailure()) return result; | 2144 if (result->IsFailure()) return result; |
2083 | 2145 |
2084 // Initialize the object | 2146 // Initialize the object |
2085 HeapObject::cast(result)->set_map(code_map()); | 2147 HeapObject::cast(result)->set_map(code_map()); |
2086 Code* code = Code::cast(result); | 2148 Code* code = Code::cast(result); |
2087 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); | 2149 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); |
2088 code->set_instruction_size(desc.instr_size); | 2150 code->set_instruction_size(desc.instr_size); |
2089 code->set_relocation_size(desc.reloc_size); | 2151 code->set_relocation_size(desc.reloc_size); |
(...skipping 13 matching lines...) Expand all Loading... |
2103 if (sinfo != NULL) sinfo->Serialize(code); // write scope info | 2165 if (sinfo != NULL) sinfo->Serialize(code); // write scope info |
2104 | 2166 |
2105 #ifdef DEBUG | 2167 #ifdef DEBUG |
2106 code->Verify(); | 2168 code->Verify(); |
2107 #endif | 2169 #endif |
2108 return code; | 2170 return code; |
2109 } | 2171 } |
2110 | 2172 |
2111 | 2173 |
2112 Object* Heap::CopyCode(Code* code) { | 2174 Object* Heap::CopyCode(Code* code) { |
| 2175 HeapData& heap_data = v8_context()->heap_data_; |
2113 // Allocate an object the same size as the code object. | 2176 // Allocate an object the same size as the code object. |
2114 int obj_size = code->Size(); | 2177 int obj_size = code->Size(); |
2115 Object* result; | 2178 Object* result; |
2116 if (obj_size > MaxObjectSizeInPagedSpace()) { | 2179 if (obj_size > MaxObjectSizeInPagedSpace()) { |
2117 result = lo_space_->AllocateRawCode(obj_size); | 2180 result = heap_data.lo_space_->AllocateRawCode(obj_size); |
2118 } else { | 2181 } else { |
2119 result = code_space_->AllocateRaw(obj_size); | 2182 result = heap_data.code_space_->AllocateRaw(obj_size); |
2120 } | 2183 } |
2121 | 2184 |
2122 if (result->IsFailure()) return result; | 2185 if (result->IsFailure()) return result; |
2123 | 2186 |
2124 // Copy code object. | 2187 // Copy code object. |
2125 Address old_addr = code->address(); | 2188 Address old_addr = code->address(); |
2126 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); | 2189 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); |
2127 CopyBlock(reinterpret_cast<Object**>(new_addr), | 2190 CopyBlock(reinterpret_cast<Object**>(new_addr), |
2128 reinterpret_cast<Object**>(old_addr), | 2191 reinterpret_cast<Object**>(old_addr), |
2129 obj_size); | 2192 obj_size); |
2130 // Relocate the copy. | 2193 // Relocate the copy. |
2131 Code* new_code = Code::cast(result); | 2194 Code* new_code = Code::cast(result); |
2132 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); | 2195 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); |
2133 new_code->Relocate(new_addr - old_addr); | 2196 new_code->Relocate(new_addr - old_addr); |
2134 return new_code; | 2197 return new_code; |
2135 } | 2198 } |
2136 | 2199 |
2137 | 2200 |
2138 Object* Heap::Allocate(Map* map, AllocationSpace space) { | 2201 Object* Heap::Allocate(Map* map, AllocationSpace space) { |
2139 ASSERT(gc_state_ == NOT_IN_GC); | 2202 ASSERT(v8_context()->heap_data_.gc_state_ == NOT_IN_GC); |
2140 ASSERT(map->instance_type() != MAP_TYPE); | 2203 ASSERT(map->instance_type() != MAP_TYPE); |
2141 Object* result = AllocateRaw(map->instance_size(), | 2204 Object* result = AllocateRaw(map->instance_size(), |
2142 space, | 2205 space, |
2143 TargetSpaceId(map->instance_type())); | 2206 TargetSpaceId(map->instance_type())); |
2144 if (result->IsFailure()) return result; | 2207 if (result->IsFailure()) return result; |
2145 HeapObject::cast(result)->set_map(map); | 2208 HeapObject::cast(result)->set_map(map); |
2146 #ifdef ENABLE_LOGGING_AND_PROFILING | 2209 #ifdef ENABLE_LOGGING_AND_PROFILING |
2147 ProducerHeapProfile::RecordJSObjectAllocation(result); | 2210 ProducerHeapProfile::RecordJSObjectAllocation(result); |
2148 #endif | 2211 #endif |
2149 return result; | 2212 return result; |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2191 return InitializeFunction(JSFunction::cast(result), shared, prototype); | 2254 return InitializeFunction(JSFunction::cast(result), shared, prototype); |
2192 } | 2255 } |
2193 | 2256 |
2194 | 2257 |
2195 Object* Heap::AllocateArgumentsObject(Object* callee, int length) { | 2258 Object* Heap::AllocateArgumentsObject(Object* callee, int length) { |
2196 // To get fast allocation and map sharing for arguments objects we | 2259 // To get fast allocation and map sharing for arguments objects we |
2197 // allocate them based on an arguments boilerplate. | 2260 // allocate them based on an arguments boilerplate. |
2198 | 2261 |
2199 // This calls Copy directly rather than using Heap::AllocateRaw so we | 2262 // This calls Copy directly rather than using Heap::AllocateRaw so we |
2200 // duplicate the check here. | 2263 // duplicate the check here. |
2201 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); | 2264 HeapData& heap_data = v8_context()->heap_data_; |
| 2265 ASSERT(heap_data.allocation_allowed_ && heap_data.gc_state_ == NOT_IN_GC); |
2202 | 2266 |
2203 JSObject* boilerplate = | 2267 JSObject* boilerplate = |
2204 Top::context()->global_context()->arguments_boilerplate(); | 2268 Top::context()->global_context()->arguments_boilerplate(); |
2205 | 2269 |
2206 // Make the clone. | 2270 // Make the clone. |
2207 Map* map = boilerplate->map(); | 2271 Map* map = boilerplate->map(); |
2208 int object_size = map->instance_size(); | 2272 int object_size = map->instance_size(); |
2209 Object* result = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); | 2273 Object* result = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); |
2210 if (result->IsFailure()) return result; | 2274 if (result->IsFailure()) return result; |
2211 | 2275 |
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2436 CopyBlock(reinterpret_cast<Object**>(clone_address), | 2500 CopyBlock(reinterpret_cast<Object**>(clone_address), |
2437 reinterpret_cast<Object**>(source->address()), | 2501 reinterpret_cast<Object**>(source->address()), |
2438 object_size); | 2502 object_size); |
2439 // Update write barrier for all fields that lie beyond the header. | 2503 // Update write barrier for all fields that lie beyond the header. |
2440 for (int offset = JSObject::kHeaderSize; | 2504 for (int offset = JSObject::kHeaderSize; |
2441 offset < object_size; | 2505 offset < object_size; |
2442 offset += kPointerSize) { | 2506 offset += kPointerSize) { |
2443 RecordWrite(clone_address, offset); | 2507 RecordWrite(clone_address, offset); |
2444 } | 2508 } |
2445 } else { | 2509 } else { |
2446 clone = new_space_.AllocateRaw(object_size); | 2510 clone = v8_context()->heap_data_.new_space_.AllocateRaw(object_size); |
2447 if (clone->IsFailure()) return clone; | 2511 if (clone->IsFailure()) return clone; |
2448 ASSERT(Heap::InNewSpace(clone)); | 2512 ASSERT(Heap::InNewSpace(clone)); |
2449 // Since we know the clone is allocated in new space, we can copy | 2513 // Since we know the clone is allocated in new space, we can copy |
2450 // the contents without worrying about updating the write barrier. | 2514 // the contents without worrying about updating the write barrier. |
2451 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()), | 2515 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()), |
2452 reinterpret_cast<Object**>(source->address()), | 2516 reinterpret_cast<Object**>(source->address()), |
2453 object_size); | 2517 object_size); |
2454 } | 2518 } |
2455 | 2519 |
2456 FixedArray* elements = FixedArray::cast(source->elements()); | 2520 FixedArray* elements = FixedArray::cast(source->elements()); |
(...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2643 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; | 2707 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; |
2644 | 2708 |
2645 // New space can't cope with forced allocation. | 2709 // New space can't cope with forced allocation. |
2646 if (always_allocate()) space = OLD_DATA_SPACE; | 2710 if (always_allocate()) space = OLD_DATA_SPACE; |
2647 | 2711 |
2648 int size = SeqAsciiString::SizeFor(length); | 2712 int size = SeqAsciiString::SizeFor(length); |
2649 | 2713 |
2650 Object* result = Failure::OutOfMemoryException(); | 2714 Object* result = Failure::OutOfMemoryException(); |
2651 if (space == NEW_SPACE) { | 2715 if (space == NEW_SPACE) { |
2652 result = size <= kMaxObjectSizeInNewSpace | 2716 result = size <= kMaxObjectSizeInNewSpace |
2653 ? new_space_.AllocateRaw(size) | 2717 ? v8_context()->heap_data_.new_space_.AllocateRaw(size) |
2654 : lo_space_->AllocateRaw(size); | 2718 : v8_context()->heap_data_.lo_space_->AllocateRaw(size); |
2655 } else { | 2719 } else { |
2656 if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE; | 2720 if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE; |
2657 result = AllocateRaw(size, space, OLD_DATA_SPACE); | 2721 result = AllocateRaw(size, space, OLD_DATA_SPACE); |
2658 } | 2722 } |
2659 if (result->IsFailure()) return result; | 2723 if (result->IsFailure()) return result; |
2660 | 2724 |
2661 // Partially initialize the object. | 2725 // Partially initialize the object. |
2662 HeapObject::cast(result)->set_map(ascii_string_map()); | 2726 HeapObject::cast(result)->set_map(ascii_string_map()); |
2663 String::cast(result)->set_length(length); | 2727 String::cast(result)->set_length(length); |
2664 String::cast(result)->set_hash_field(String::kEmptyHashField); | 2728 String::cast(result)->set_hash_field(String::kEmptyHashField); |
2665 ASSERT_EQ(size, HeapObject::cast(result)->Size()); | 2729 ASSERT_EQ(size, HeapObject::cast(result)->Size()); |
2666 return result; | 2730 return result; |
2667 } | 2731 } |
2668 | 2732 |
2669 | 2733 |
2670 Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) { | 2734 Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) { |
2671 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; | 2735 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; |
2672 | 2736 |
2673 // New space can't cope with forced allocation. | 2737 // New space can't cope with forced allocation. |
2674 if (always_allocate()) space = OLD_DATA_SPACE; | 2738 if (always_allocate()) space = OLD_DATA_SPACE; |
2675 | 2739 |
2676 int size = SeqTwoByteString::SizeFor(length); | 2740 int size = SeqTwoByteString::SizeFor(length); |
2677 | 2741 |
2678 Object* result = Failure::OutOfMemoryException(); | 2742 Object* result = Failure::OutOfMemoryException(); |
2679 if (space == NEW_SPACE) { | 2743 if (space == NEW_SPACE) { |
2680 result = size <= kMaxObjectSizeInNewSpace | 2744 result = size <= kMaxObjectSizeInNewSpace |
2681 ? new_space_.AllocateRaw(size) | 2745 ? v8_context()->heap_data_.new_space_.AllocateRaw(size) |
2682 : lo_space_->AllocateRaw(size); | 2746 : v8_context()->heap_data_.lo_space_->AllocateRaw(size); |
2683 } else { | 2747 } else { |
2684 if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE; | 2748 if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE; |
2685 result = AllocateRaw(size, space, OLD_DATA_SPACE); | 2749 result = AllocateRaw(size, space, OLD_DATA_SPACE); |
2686 } | 2750 } |
2687 if (result->IsFailure()) return result; | 2751 if (result->IsFailure()) return result; |
2688 | 2752 |
2689 // Partially initialize the object. | 2753 // Partially initialize the object. |
2690 HeapObject::cast(result)->set_map(string_map()); | 2754 HeapObject::cast(result)->set_map(string_map()); |
2691 String::cast(result)->set_length(length); | 2755 String::cast(result)->set_length(length); |
2692 String::cast(result)->set_hash_field(String::kEmptyHashField); | 2756 String::cast(result)->set_hash_field(String::kEmptyHashField); |
(...skipping 12 matching lines...) Expand all Loading... |
2705 return result; | 2769 return result; |
2706 } | 2770 } |
2707 | 2771 |
2708 | 2772 |
2709 Object* Heap::AllocateRawFixedArray(int length) { | 2773 Object* Heap::AllocateRawFixedArray(int length) { |
2710 // Use the general function if we're forced to always allocate. | 2774 // Use the general function if we're forced to always allocate. |
2711 if (always_allocate()) return AllocateFixedArray(length, TENURED); | 2775 if (always_allocate()) return AllocateFixedArray(length, TENURED); |
2712 // Allocate the raw data for a fixed array. | 2776 // Allocate the raw data for a fixed array. |
2713 int size = FixedArray::SizeFor(length); | 2777 int size = FixedArray::SizeFor(length); |
2714 return size <= kMaxObjectSizeInNewSpace | 2778 return size <= kMaxObjectSizeInNewSpace |
2715 ? new_space_.AllocateRaw(size) | 2779 ? v8_context()->heap_data_.new_space_.AllocateRaw(size) |
2716 : lo_space_->AllocateRawFixedArray(size); | 2780 : v8_context()->heap_data_.lo_space_->AllocateRawFixedArray(size); |
2717 } | 2781 } |
2718 | 2782 |
2719 | 2783 |
2720 Object* Heap::CopyFixedArray(FixedArray* src) { | 2784 Object* Heap::CopyFixedArray(FixedArray* src) { |
2721 int len = src->length(); | 2785 int len = src->length(); |
2722 Object* obj = AllocateRawFixedArray(len); | 2786 Object* obj = AllocateRawFixedArray(len); |
2723 if (obj->IsFailure()) return obj; | 2787 if (obj->IsFailure()) return obj; |
2724 if (Heap::InNewSpace(obj)) { | 2788 if (Heap::InNewSpace(obj)) { |
2725 HeapObject* dst = HeapObject::cast(obj); | 2789 HeapObject* dst = HeapObject::cast(obj); |
2726 CopyBlock(reinterpret_cast<Object**>(dst->address()), | 2790 CopyBlock(reinterpret_cast<Object**>(dst->address()), |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2759 | 2823 |
2760 Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) { | 2824 Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) { |
2761 ASSERT(empty_fixed_array()->IsFixedArray()); | 2825 ASSERT(empty_fixed_array()->IsFixedArray()); |
2762 if (length == 0) return empty_fixed_array(); | 2826 if (length == 0) return empty_fixed_array(); |
2763 | 2827 |
2764 // New space can't cope with forced allocation. | 2828 // New space can't cope with forced allocation. |
2765 if (always_allocate()) pretenure = TENURED; | 2829 if (always_allocate()) pretenure = TENURED; |
2766 | 2830 |
2767 int size = FixedArray::SizeFor(length); | 2831 int size = FixedArray::SizeFor(length); |
2768 Object* result = Failure::OutOfMemoryException(); | 2832 Object* result = Failure::OutOfMemoryException(); |
| 2833 HeapData& heap_data = v8_context()->heap_data_; |
2769 if (pretenure != TENURED) { | 2834 if (pretenure != TENURED) { |
2770 result = size <= kMaxObjectSizeInNewSpace | 2835 result = size <= kMaxObjectSizeInNewSpace |
2771 ? new_space_.AllocateRaw(size) | 2836 ? heap_data.new_space_.AllocateRaw(size) |
2772 : lo_space_->AllocateRawFixedArray(size); | 2837 : heap_data.lo_space_->AllocateRawFixedArray(size); |
2773 } | 2838 } |
2774 if (result->IsFailure()) { | 2839 if (result->IsFailure()) { |
2775 if (size > MaxObjectSizeInPagedSpace()) { | 2840 if (size > MaxObjectSizeInPagedSpace()) { |
2776 result = lo_space_->AllocateRawFixedArray(size); | 2841 result = heap_data.lo_space_->AllocateRawFixedArray(size); |
2777 } else { | 2842 } else { |
2778 AllocationSpace space = | 2843 AllocationSpace space = |
2779 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; | 2844 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; |
2780 result = AllocateRaw(size, space, OLD_POINTER_SPACE); | 2845 result = AllocateRaw(size, space, OLD_POINTER_SPACE); |
2781 } | 2846 } |
2782 if (result->IsFailure()) return result; | 2847 if (result->IsFailure()) return result; |
2783 } | 2848 } |
2784 // Initialize the object. | 2849 // Initialize the object. |
2785 reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); | 2850 reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); |
2786 FixedArray* array = FixedArray::cast(result); | 2851 FixedArray* array = FixedArray::cast(result); |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2885 if (result->IsFailure()) return result; | 2950 if (result->IsFailure()) return result; |
2886 Struct::cast(result)->InitializeBody(size); | 2951 Struct::cast(result)->InitializeBody(size); |
2887 return result; | 2952 return result; |
2888 } | 2953 } |
2889 | 2954 |
2890 | 2955 |
2891 bool Heap::IdleNotification() { | 2956 bool Heap::IdleNotification() { |
2892 static const int kIdlesBeforeScavenge = 4; | 2957 static const int kIdlesBeforeScavenge = 4; |
2893 static const int kIdlesBeforeMarkSweep = 7; | 2958 static const int kIdlesBeforeMarkSweep = 7; |
2894 static const int kIdlesBeforeMarkCompact = 8; | 2959 static const int kIdlesBeforeMarkCompact = 8; |
2895 static int number_idle_notifications = 0; | |
2896 static int last_gc_count = gc_count_; | |
2897 | 2960 |
| 2961 HeapData& heap_data = v8_context()->heap_data_; |
| 2962 HeapPrivateData& heap_private_data = heap_data.heap_private_data_; |
2898 bool finished = false; | 2963 bool finished = false; |
2899 | 2964 |
2900 if (last_gc_count == gc_count_) { | 2965 if (heap_private_data.last_gc_count_ == heap_data.gc_count_) { |
2901 number_idle_notifications++; | 2966 heap_private_data.number_idle_notifications_++; |
2902 } else { | 2967 } else { |
2903 number_idle_notifications = 0; | 2968 heap_private_data.number_idle_notifications_ = 0; |
2904 last_gc_count = gc_count_; | 2969 heap_private_data.last_gc_count_ = heap_data.gc_count_; |
2905 } | 2970 } |
2906 | 2971 |
2907 if (number_idle_notifications == kIdlesBeforeScavenge) { | 2972 if (heap_private_data.number_idle_notifications_ == kIdlesBeforeScavenge) { |
2908 CollectGarbage(0, NEW_SPACE); | 2973 CollectGarbage(0, NEW_SPACE); |
2909 new_space_.Shrink(); | 2974 heap_data.new_space_.Shrink(); |
2910 last_gc_count = gc_count_; | 2975 heap_private_data.last_gc_count_ = heap_data.gc_count_; |
2911 | 2976 |
2912 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) { | 2977 } else if (heap_private_data.number_idle_notifications_ == |
| 2978 kIdlesBeforeMarkSweep) { |
2913 // Before doing the mark-sweep collections we clear the | 2979 // Before doing the mark-sweep collections we clear the |
2914 // compilation cache to avoid hanging on to source code and | 2980 // compilation cache to avoid hanging on to source code and |
2915 // generated code for cached functions. | 2981 // generated code for cached functions. |
2916 CompilationCache::Clear(); | 2982 CompilationCache::Clear(); |
2917 | 2983 |
2918 CollectAllGarbage(false); | 2984 CollectAllGarbage(false); |
2919 new_space_.Shrink(); | 2985 heap_data.new_space_.Shrink(); |
2920 last_gc_count = gc_count_; | 2986 heap_private_data.last_gc_count_ = heap_data.gc_count_; |
2921 | 2987 |
2922 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) { | 2988 } else if (heap_private_data.number_idle_notifications_ == |
| 2989 kIdlesBeforeMarkCompact) { |
2923 CollectAllGarbage(true); | 2990 CollectAllGarbage(true); |
2924 new_space_.Shrink(); | 2991 heap_data.new_space_.Shrink(); |
2925 last_gc_count = gc_count_; | 2992 heap_private_data.last_gc_count_ = heap_data.gc_count_; |
2926 number_idle_notifications = 0; | 2993 heap_private_data.number_idle_notifications_ = 0; |
2927 finished = true; | 2994 finished = true; |
2928 } | 2995 } |
2929 | 2996 |
2930 // Uncommit unused memory in new space. | 2997 // Uncommit unused memory in new space. |
2931 Heap::UncommitFromSpace(); | 2998 Heap::UncommitFromSpace(); |
2932 return finished; | 2999 return finished; |
2933 } | 3000 } |
2934 | 3001 |
2935 | 3002 |
2936 #ifdef DEBUG | 3003 #ifdef DEBUG |
2937 | 3004 |
2938 void Heap::Print() { | 3005 void Heap::Print() { |
2939 if (!HasBeenSetup()) return; | 3006 if (!HasBeenSetup()) return; |
2940 Top::PrintStack(); | 3007 Top::PrintStack(); |
2941 AllSpaces spaces; | 3008 AllSpaces spaces; |
2942 while (Space* space = spaces.next()) space->Print(); | 3009 while (Space* space = spaces.next()) space->Print(); |
2943 } | 3010 } |
2944 | 3011 |
2945 | 3012 |
2946 void Heap::ReportCodeStatistics(const char* title) { | 3013 void Heap::ReportCodeStatistics(const char* title) { |
| 3014 HeapData& heap_data = v8_context()->heap_data_; |
2947 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title); | 3015 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title); |
2948 PagedSpace::ResetCodeStatistics(); | 3016 PagedSpace::ResetCodeStatistics(); |
2949 // We do not look for code in new space, map space, or old space. If code | 3017 // We do not look for code in new space, map space, or old space. If code |
2950 // somehow ends up in those spaces, we would miss it here. | 3018 // somehow ends up in those spaces, we would miss it here. |
2951 code_space_->CollectCodeStatistics(); | 3019 heap_data.code_space_->CollectCodeStatistics(); |
2952 lo_space_->CollectCodeStatistics(); | 3020 heap_data.lo_space_->CollectCodeStatistics(); |
2953 PagedSpace::ReportCodeStatistics(); | 3021 PagedSpace::ReportCodeStatistics(); |
2954 } | 3022 } |
2955 | 3023 |
2956 | 3024 |
2957 // This function expects that NewSpace's allocated objects histogram is | 3025 // This function expects that NewSpace's allocated objects histogram is |
2958 // populated (via a call to CollectStatistics or else as a side effect of a | 3026 // populated (via a call to CollectStatistics or else as a side effect of a |
2959 // just-completed scavenge collection). | 3027 // just-completed scavenge collection). |
2960 void Heap::ReportHeapStatistics(const char* title) { | 3028 void Heap::ReportHeapStatistics(const char* title) { |
| 3029 HeapData& heap_data = v8_context()->heap_data_; |
| 3030 |
2961 USE(title); | 3031 USE(title); |
2962 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", | 3032 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", |
2963 title, gc_count_); | 3033 title, heap_data.gc_count_); |
2964 PrintF("mark-compact GC : %d\n", mc_count_); | 3034 PrintF("mark-compact GC : %d\n", heap_data.mc_count_); |
2965 PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_); | 3035 PrintF("old_gen_promotion_limit_ %d\n", heap_data.old_gen_promotion_limit_); |
2966 PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_); | 3036 PrintF("old_gen_allocation_limit_ %d\n", heap_data.old_gen_allocation_limit_); |
2967 | 3037 |
2968 PrintF("\n"); | 3038 PrintF("\n"); |
2969 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles()); | 3039 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles()); |
2970 GlobalHandles::PrintStats(); | 3040 GlobalHandles::PrintStats(); |
2971 PrintF("\n"); | 3041 PrintF("\n"); |
2972 | 3042 |
2973 PrintF("Heap statistics : "); | 3043 PrintF("Heap statistics : "); |
2974 MemoryAllocator::ReportStatistics(); | 3044 MemoryAllocator::ReportStatistics(); |
2975 PrintF("To space : "); | 3045 PrintF("To space : "); |
2976 new_space_.ReportStatistics(); | 3046 heap_data.new_space_.ReportStatistics(); |
2977 PrintF("Old pointer space : "); | 3047 PrintF("Old pointer space : "); |
2978 old_pointer_space_->ReportStatistics(); | 3048 heap_data.old_pointer_space_->ReportStatistics(); |
2979 PrintF("Old data space : "); | 3049 PrintF("Old data space : "); |
2980 old_data_space_->ReportStatistics(); | 3050 heap_data.old_data_space_->ReportStatistics(); |
2981 PrintF("Code space : "); | 3051 PrintF("Code space : "); |
2982 code_space_->ReportStatistics(); | 3052 heap_data.code_space_->ReportStatistics(); |
2983 PrintF("Map space : "); | 3053 PrintF("Map space : "); |
2984 map_space_->ReportStatistics(); | 3054 heap_data.map_space_->ReportStatistics(); |
2985 PrintF("Cell space : "); | 3055 PrintF("Cell space : "); |
2986 cell_space_->ReportStatistics(); | 3056 heap_data.cell_space_->ReportStatistics(); |
2987 PrintF("Large object space : "); | 3057 PrintF("Large object space : "); |
2988 lo_space_->ReportStatistics(); | 3058 heap_data.lo_space_->ReportStatistics(); |
2989 PrintF(">>>>>> ========================================= >>>>>>\n"); | 3059 PrintF(">>>>>> ========================================= >>>>>>\n"); |
2990 } | 3060 } |
2991 | 3061 |
2992 #endif // DEBUG | 3062 #endif // DEBUG |
2993 | 3063 |
2994 bool Heap::Contains(HeapObject* value) { | 3064 bool Heap::Contains(HeapObject* value) { |
2995 return Contains(value->address()); | 3065 return Contains(value->address()); |
2996 } | 3066 } |
2997 | 3067 |
2998 | 3068 |
2999 bool Heap::Contains(Address addr) { | 3069 bool Heap::Contains(Address addr) { |
3000 if (OS::IsOutsideAllocatedSpace(addr)) return false; | 3070 if (OS::IsOutsideAllocatedSpace(addr)) return false; |
| 3071 HeapData& heap_data = v8_context()->heap_data_; |
3001 return HasBeenSetup() && | 3072 return HasBeenSetup() && |
3002 (new_space_.ToSpaceContains(addr) || | 3073 (heap_data.new_space_.ToSpaceContains(addr) || |
3003 old_pointer_space_->Contains(addr) || | 3074 heap_data.old_pointer_space_->Contains(addr) || |
3004 old_data_space_->Contains(addr) || | 3075 heap_data.old_data_space_->Contains(addr) || |
3005 code_space_->Contains(addr) || | 3076 heap_data.code_space_->Contains(addr) || |
3006 map_space_->Contains(addr) || | 3077 heap_data.map_space_->Contains(addr) || |
3007 cell_space_->Contains(addr) || | 3078 heap_data.cell_space_->Contains(addr) || |
3008 lo_space_->SlowContains(addr)); | 3079 heap_data.lo_space_->SlowContains(addr)); |
3009 } | 3080 } |
3010 | 3081 |
3011 | 3082 |
3012 bool Heap::InSpace(HeapObject* value, AllocationSpace space) { | 3083 bool Heap::InSpace(HeapObject* value, AllocationSpace space) { |
3013 return InSpace(value->address(), space); | 3084 return InSpace(value->address(), space); |
3014 } | 3085 } |
3015 | 3086 |
3016 | 3087 |
3017 bool Heap::InSpace(Address addr, AllocationSpace space) { | 3088 bool Heap::InSpace(Address addr, AllocationSpace space) { |
3018 if (OS::IsOutsideAllocatedSpace(addr)) return false; | 3089 if (OS::IsOutsideAllocatedSpace(addr)) return false; |
3019 if (!HasBeenSetup()) return false; | 3090 if (!HasBeenSetup()) return false; |
3020 | 3091 |
| 3092 HeapData& heap_data = v8_context()->heap_data_; |
3021 switch (space) { | 3093 switch (space) { |
3022 case NEW_SPACE: | 3094 case NEW_SPACE: |
3023 return new_space_.ToSpaceContains(addr); | 3095 return heap_data.new_space_.ToSpaceContains(addr); |
3024 case OLD_POINTER_SPACE: | 3096 case OLD_POINTER_SPACE: |
3025 return old_pointer_space_->Contains(addr); | 3097 return heap_data.old_pointer_space_->Contains(addr); |
3026 case OLD_DATA_SPACE: | 3098 case OLD_DATA_SPACE: |
3027 return old_data_space_->Contains(addr); | 3099 return heap_data.old_data_space_->Contains(addr); |
3028 case CODE_SPACE: | 3100 case CODE_SPACE: |
3029 return code_space_->Contains(addr); | 3101 return heap_data.code_space_->Contains(addr); |
3030 case MAP_SPACE: | 3102 case MAP_SPACE: |
3031 return map_space_->Contains(addr); | 3103 return heap_data.map_space_->Contains(addr); |
3032 case CELL_SPACE: | 3104 case CELL_SPACE: |
3033 return cell_space_->Contains(addr); | 3105 return heap_data.cell_space_->Contains(addr); |
3034 case LO_SPACE: | 3106 case LO_SPACE: |
3035 return lo_space_->SlowContains(addr); | 3107 return heap_data.lo_space_->SlowContains(addr); |
3036 } | 3108 } |
3037 | 3109 |
3038 return false; | 3110 return false; |
3039 } | 3111 } |
3040 | 3112 |
3041 | 3113 |
3042 #ifdef DEBUG | 3114 #ifdef DEBUG |
3043 void Heap::Verify() { | 3115 void Heap::Verify() { |
3044 ASSERT(HasBeenSetup()); | 3116 ASSERT(HasBeenSetup()); |
3045 | 3117 |
| 3118 HeapData& heap_data = v8_context()->heap_data_; |
3046 VerifyPointersVisitor visitor; | 3119 VerifyPointersVisitor visitor; |
3047 IterateRoots(&visitor, VISIT_ONLY_STRONG); | 3120 IterateRoots(&visitor, VISIT_ONLY_STRONG); |
3048 | 3121 |
3049 new_space_.Verify(); | 3122 heap_data.new_space_.Verify(); |
3050 | 3123 |
3051 VerifyPointersAndRSetVisitor rset_visitor; | 3124 VerifyPointersAndRSetVisitor rset_visitor; |
3052 old_pointer_space_->Verify(&rset_visitor); | 3125 heap_data.old_pointer_space_->Verify(&rset_visitor); |
3053 map_space_->Verify(&rset_visitor); | 3126 heap_data.map_space_->Verify(&rset_visitor); |
3054 | 3127 |
3055 VerifyPointersVisitor no_rset_visitor; | 3128 VerifyPointersVisitor no_rset_visitor; |
3056 old_data_space_->Verify(&no_rset_visitor); | 3129 heap_data.old_data_space_->Verify(&no_rset_visitor); |
3057 code_space_->Verify(&no_rset_visitor); | 3130 heap_data.code_space_->Verify(&no_rset_visitor); |
3058 cell_space_->Verify(&no_rset_visitor); | 3131 heap_data.cell_space_->Verify(&no_rset_visitor); |
3059 | 3132 |
3060 lo_space_->Verify(); | 3133 heap_data.lo_space_->Verify(); |
3061 } | 3134 } |
3062 #endif // DEBUG | 3135 #endif // DEBUG |
3063 | 3136 |
3064 | 3137 |
3065 Object* Heap::LookupSymbol(Vector<const char> string) { | 3138 Object* Heap::LookupSymbol(Vector<const char> string) { |
3066 Object* symbol = NULL; | 3139 Object* symbol = NULL; |
3067 Object* new_table = symbol_table()->LookupSymbol(string, &symbol); | 3140 Object* new_table = symbol_table()->LookupSymbol(string, &symbol); |
3068 if (new_table->IsFailure()) return new_table; | 3141 if (new_table->IsFailure()) return new_table; |
3069 // Can't use set_symbol_table because SymbolTable::cast knows that | 3142 // Can't use set_symbol_table because SymbolTable::cast knows that |
3070 // SymbolTable is a singleton and checks for identity. | 3143 // SymbolTable is a singleton and checks for identity. |
3071 roots_[kSymbolTableRootIndex] = new_table; | 3144 v8_context()->heap_data_.roots_[kSymbolTableRootIndex] = new_table; |
3072 ASSERT(symbol != NULL); | 3145 ASSERT(symbol != NULL); |
3073 return symbol; | 3146 return symbol; |
3074 } | 3147 } |
3075 | 3148 |
3076 | 3149 |
3077 Object* Heap::LookupSymbol(String* string) { | 3150 Object* Heap::LookupSymbol(String* string) { |
3078 if (string->IsSymbol()) return string; | 3151 if (string->IsSymbol()) return string; |
3079 Object* symbol = NULL; | 3152 Object* symbol = NULL; |
3080 Object* new_table = symbol_table()->LookupString(string, &symbol); | 3153 Object* new_table = symbol_table()->LookupString(string, &symbol); |
3081 if (new_table->IsFailure()) return new_table; | 3154 if (new_table->IsFailure()) return new_table; |
3082 // Can't use set_symbol_table because SymbolTable::cast knows that | 3155 // Can't use set_symbol_table because SymbolTable::cast knows that |
3083 // SymbolTable is a singleton and checks for identity. | 3156 // SymbolTable is a singleton and checks for identity. |
3084 roots_[kSymbolTableRootIndex] = new_table; | 3157 v8_context()->heap_data_.roots_[kSymbolTableRootIndex] = new_table; |
3085 ASSERT(symbol != NULL); | 3158 ASSERT(symbol != NULL); |
3086 return symbol; | 3159 return symbol; |
3087 } | 3160 } |
3088 | 3161 |
3089 | 3162 |
3090 bool Heap::LookupSymbolIfExists(String* string, String** symbol) { | 3163 bool Heap::LookupSymbolIfExists(String* string, String** symbol) { |
3091 if (string->IsSymbol()) { | 3164 if (string->IsSymbol()) { |
3092 *symbol = string; | 3165 *symbol = string; |
3093 return true; | 3166 return true; |
3094 } | 3167 } |
3095 return symbol_table()->LookupSymbolIfExists(string, symbol); | 3168 return symbol_table()->LookupSymbolIfExists(string, symbol); |
3096 } | 3169 } |
3097 | 3170 |
3098 | 3171 |
3099 #ifdef DEBUG | 3172 #ifdef DEBUG |
3100 void Heap::ZapFromSpace() { | 3173 void Heap::ZapFromSpace() { |
3101 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject()); | 3174 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject()); |
3102 for (Address a = new_space_.FromSpaceLow(); | 3175 HeapData& heap_data = v8_context()->heap_data_; |
3103 a < new_space_.FromSpaceHigh(); | 3176 for (Address a = heap_data.new_space_.FromSpaceLow(); |
| 3177 a < heap_data.new_space_.FromSpaceHigh(); |
3104 a += kPointerSize) { | 3178 a += kPointerSize) { |
3105 Memory::Address_at(a) = kFromSpaceZapValue; | 3179 Memory::Address_at(a) = kFromSpaceZapValue; |
3106 } | 3180 } |
3107 } | 3181 } |
3108 #endif // DEBUG | 3182 #endif // DEBUG |
3109 | 3183 |
3110 | 3184 |
3111 int Heap::IterateRSetRange(Address object_start, | 3185 int Heap::IterateRSetRange(Address object_start, |
3112 Address object_end, | 3186 Address object_end, |
3113 Address rset_start, | 3187 Address rset_start, |
(...skipping 29 matching lines...) Expand all Loading... |
3143 // No bits in the word were set. This is the common case. | 3217 // No bits in the word were set. This is the common case. |
3144 object_address += kPointerSize * kBitsPerInt; | 3218 object_address += kPointerSize * kBitsPerInt; |
3145 } | 3219 } |
3146 rset_address += kIntSize; | 3220 rset_address += kIntSize; |
3147 } | 3221 } |
3148 return set_bits_count; | 3222 return set_bits_count; |
3149 } | 3223 } |
3150 | 3224 |
3151 | 3225 |
3152 void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) { | 3226 void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) { |
| 3227 HeapData& heap_data = v8_context()->heap_data_; |
3153 ASSERT(Page::is_rset_in_use()); | 3228 ASSERT(Page::is_rset_in_use()); |
3154 ASSERT(space == old_pointer_space_ || space == map_space_); | 3229 ASSERT(space == heap_data.old_pointer_space_ || |
3155 | 3230 space == heap_data.map_space_); |
3156 static void* paged_rset_histogram = StatsTable::CreateHistogram( | |
3157 "V8.RSetPaged", | |
3158 0, | |
3159 Page::kObjectAreaSize / kPointerSize, | |
3160 30); | |
3161 | 3231 |
3162 PageIterator it(space, PageIterator::PAGES_IN_USE); | 3232 PageIterator it(space, PageIterator::PAGES_IN_USE); |
3163 while (it.has_next()) { | 3233 while (it.has_next()) { |
3164 Page* page = it.next(); | 3234 Page* page = it.next(); |
3165 int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(), | 3235 int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(), |
3166 page->RSetStart(), copy_object_func); | 3236 page->RSetStart(), copy_object_func); |
3167 if (paged_rset_histogram != NULL) { | 3237 if (heap_data.heap_private_data_.paged_rset_histogram_ != NULL) { |
3168 StatsTable::AddHistogramSample(paged_rset_histogram, count); | 3238 StatsTable::AddHistogramSample( |
| 3239 heap_data.heap_private_data_.paged_rset_histogram_, count); |
3169 } | 3240 } |
3170 } | 3241 } |
3171 } | 3242 } |
3172 | 3243 |
3173 | 3244 |
3174 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { | 3245 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { |
3175 IterateStrongRoots(v, mode); | 3246 IterateStrongRoots(v, mode); |
3176 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); | 3247 v->VisitPointer(reinterpret_cast<Object**>( |
| 3248 &v8_context()->heap_data_.roots_[kSymbolTableRootIndex])); |
3177 v->Synchronize("symbol_table"); | 3249 v->Synchronize("symbol_table"); |
3178 } | 3250 } |
3179 | 3251 |
3180 | 3252 |
3181 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { | 3253 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { |
3182 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]); | 3254 HeapData& heap_data = v8_context()->heap_data_; |
| 3255 v->VisitPointers(&heap_data.roots_[0], |
| 3256 &heap_data.roots_[kStrongRootListLength]); |
3183 v->Synchronize("strong_root_list"); | 3257 v->Synchronize("strong_root_list"); |
3184 | 3258 |
3185 v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_)); | 3259 v->VisitPointer(bit_cast<Object**, String**>(&heap_data.hidden_symbol_)); |
3186 v->Synchronize("symbol"); | 3260 v->Synchronize("symbol"); |
3187 | 3261 |
3188 Bootstrapper::Iterate(v); | 3262 Bootstrapper::Iterate(v); |
3189 v->Synchronize("bootstrapper"); | 3263 v->Synchronize("bootstrapper"); |
3190 Top::Iterate(v); | 3264 Top::Iterate(v); |
3191 v->Synchronize("top"); | 3265 v->Synchronize("top"); |
3192 Relocatable::Iterate(v); | 3266 Relocatable::Iterate(v); |
3193 v->Synchronize("relocatable"); | 3267 v->Synchronize("relocatable"); |
3194 | 3268 |
3195 #ifdef ENABLE_DEBUGGER_SUPPORT | 3269 #ifdef ENABLE_DEBUGGER_SUPPORT |
(...skipping 20 matching lines...) Expand all Loading... |
3216 } else { | 3290 } else { |
3217 GlobalHandles::IterateAllRoots(v); | 3291 GlobalHandles::IterateAllRoots(v); |
3218 } | 3292 } |
3219 v->Synchronize("globalhandles"); | 3293 v->Synchronize("globalhandles"); |
3220 | 3294 |
3221 // Iterate over pointers being held by inactive threads. | 3295 // Iterate over pointers being held by inactive threads. |
3222 ThreadManager::Iterate(v); | 3296 ThreadManager::Iterate(v); |
3223 v->Synchronize("threadmanager"); | 3297 v->Synchronize("threadmanager"); |
3224 } | 3298 } |
3225 | 3299 |
3226 | |
3227 // Flag is set when the heap has been configured. The heap can be repeatedly | |
3228 // configured through the API until it is setup. | |
3229 static bool heap_configured = false; | |
3230 | |
3231 // TODO(1236194): Since the heap size is configurable on the command line | 3300 // TODO(1236194): Since the heap size is configurable on the command line |
3232 // and through the API, we should gracefully handle the case that the heap | 3301 // and through the API, we should gracefully handle the case that the heap |
3233 // size is not big enough to fit all the initial objects. | 3302 // size is not big enough to fit all the initial objects. |
3234 bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) { | 3303 bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) { |
3235 if (HasBeenSetup()) return false; | 3304 if (HasBeenSetup()) return false; |
3236 | 3305 |
3237 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size; | 3306 HeapData& heap_data = v8_context()->heap_data_; |
| 3307 if (max_semispace_size > 0) |
| 3308 heap_data.max_semispace_size_ = max_semispace_size; |
3238 | 3309 |
3239 if (Snapshot::IsEnabled()) { | 3310 if (Snapshot::IsEnabled()) { |
3240 // If we are using a snapshot we always reserve the default amount | 3311 // If we are using a snapshot we always reserve the default amount |
3241 // of memory for each semispace because code in the snapshot has | 3312 // of memory for each semispace because code in the snapshot has |
3242 // write-barrier code that relies on the size and alignment of new | 3313 // write-barrier code that relies on the size and alignment of new |
3243 // space. We therefore cannot use a larger max semispace size | 3314 // space. We therefore cannot use a larger max semispace size |
3244 // than the default reserved semispace size. | 3315 // than the default reserved semispace size. |
3245 if (max_semispace_size_ > reserved_semispace_size_) { | 3316 if (heap_data.max_semispace_size_ > heap_data.reserved_semispace_size_) { |
3246 max_semispace_size_ = reserved_semispace_size_; | 3317 heap_data.max_semispace_size_ = heap_data.reserved_semispace_size_; |
3247 } | 3318 } |
3248 } else { | 3319 } else { |
3249 // If we are not using snapshots we reserve space for the actual | 3320 // If we are not using snapshots we reserve space for the actual |
3250 // max semispace size. | 3321 // max semispace size. |
3251 reserved_semispace_size_ = max_semispace_size_; | 3322 heap_data.reserved_semispace_size_ = heap_data.max_semispace_size_; |
3252 } | 3323 } |
3253 | 3324 |
3254 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size; | 3325 if (max_old_gen_size > 0) |
| 3326 heap_data.max_old_generation_size_ = max_old_gen_size; |
3255 | 3327 |
3256 // The new space size must be a power of two to support single-bit testing | 3328 // The new space size must be a power of two to support single-bit testing |
3257 // for containment. | 3329 // for containment. |
3258 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_); | 3330 heap_data.max_semispace_size_ = RoundUpToPowerOf2( |
3259 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_); | 3331 heap_data.max_semispace_size_); |
3260 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_); | 3332 heap_data.reserved_semispace_size_ = RoundUpToPowerOf2( |
3261 external_allocation_limit_ = 10 * max_semispace_size_; | 3333 heap_data.reserved_semispace_size_); |
| 3334 heap_data.initial_semispace_size_ = Min(heap_data.initial_semispace_size_, |
| 3335 heap_data.max_semispace_size_); |
| 3336 heap_data.external_allocation_limit_ = 10 * heap_data.max_semispace_size_; |
3262 | 3337 |
3263 // The old generation is paged. | 3338 // The old generation is paged. |
3264 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize); | 3339 heap_data.max_old_generation_size_ = RoundUp( |
| 3340 heap_data.max_old_generation_size_, Page::kPageSize); |
3265 | 3341 |
3266 heap_configured = true; | 3342 heap_data.heap_configured_ = true; |
3267 return true; | 3343 return true; |
3268 } | 3344 } |
3269 | 3345 |
3270 | 3346 |
3271 bool Heap::ConfigureHeapDefault() { | 3347 bool Heap::ConfigureHeapDefault() { |
3272 return ConfigureHeap(FLAG_max_new_space_size / 2, FLAG_max_old_space_size); | 3348 return ConfigureHeap(FLAG_max_new_space_size / 2, FLAG_max_old_space_size); |
3273 } | 3349 } |
3274 | 3350 |
3275 | 3351 |
3276 void Heap::RecordStats(HeapStats* stats) { | 3352 void Heap::RecordStats(HeapStats* stats) { |
3277 *stats->start_marker = 0xDECADE00; | 3353 *stats->start_marker = 0xDECADE00; |
3278 *stats->end_marker = 0xDECADE01; | 3354 *stats->end_marker = 0xDECADE01; |
3279 *stats->new_space_size = new_space_.Size(); | 3355 HeapData& heap_data = v8_context()->heap_data_; |
3280 *stats->new_space_capacity = new_space_.Capacity(); | 3356 *stats->new_space_size = heap_data.new_space_.Size(); |
3281 *stats->old_pointer_space_size = old_pointer_space_->Size(); | 3357 *stats->new_space_capacity = heap_data.new_space_.Capacity(); |
3282 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity(); | 3358 *stats->old_pointer_space_size = heap_data.old_pointer_space_->Size(); |
3283 *stats->old_data_space_size = old_data_space_->Size(); | 3359 *stats->old_pointer_space_capacity = heap_data.old_pointer_space_->Capacity(); |
3284 *stats->old_data_space_capacity = old_data_space_->Capacity(); | 3360 *stats->old_data_space_size = heap_data.old_data_space_->Size(); |
3285 *stats->code_space_size = code_space_->Size(); | 3361 *stats->old_data_space_capacity = heap_data.old_data_space_->Capacity(); |
3286 *stats->code_space_capacity = code_space_->Capacity(); | 3362 *stats->code_space_size = heap_data.code_space_->Size(); |
3287 *stats->map_space_size = map_space_->Size(); | 3363 *stats->code_space_capacity = heap_data.code_space_->Capacity(); |
3288 *stats->map_space_capacity = map_space_->Capacity(); | 3364 *stats->map_space_size = heap_data.map_space_->Size(); |
3289 *stats->cell_space_size = cell_space_->Size(); | 3365 *stats->map_space_capacity = heap_data.map_space_->Capacity(); |
3290 *stats->cell_space_capacity = cell_space_->Capacity(); | 3366 *stats->cell_space_size = heap_data.cell_space_->Size(); |
3291 *stats->lo_space_size = lo_space_->Size(); | 3367 *stats->cell_space_capacity = heap_data.cell_space_->Capacity(); |
| 3368 *stats->lo_space_size = heap_data.lo_space_->Size(); |
3292 GlobalHandles::RecordStats(stats); | 3369 GlobalHandles::RecordStats(stats); |
3293 } | 3370 } |
3294 | 3371 |
3295 | 3372 |
3296 int Heap::PromotedSpaceSize() { | 3373 int Heap::PromotedSpaceSize() { |
3297 return old_pointer_space_->Size() | 3374 HeapData& heap_data = v8_context()->heap_data_; |
3298 + old_data_space_->Size() | 3375 return heap_data.old_pointer_space_->Size() |
3299 + code_space_->Size() | 3376 + heap_data.old_data_space_->Size() |
3300 + map_space_->Size() | 3377 + heap_data.code_space_->Size() |
3301 + cell_space_->Size() | 3378 + heap_data.map_space_->Size() |
3302 + lo_space_->Size(); | 3379 + heap_data.cell_space_->Size() |
| 3380 + heap_data.lo_space_->Size(); |
3303 } | 3381 } |
3304 | 3382 |
3305 | 3383 |
3306 int Heap::PromotedExternalMemorySize() { | 3384 int Heap::PromotedExternalMemorySize() { |
3307 if (amount_of_external_allocated_memory_ | 3385 HeapData& heap_data = v8_context()->heap_data_; |
3308 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0; | 3386 if (heap_data.amount_of_external_allocated_memory_ |
3309 return amount_of_external_allocated_memory_ | 3387 <= heap_data.amount_of_external_allocated_memory_at_last_global_gc_ |
3310 - amount_of_external_allocated_memory_at_last_global_gc_; | 3388 ) { |
| 3389 return 0; |
| 3390 } |
| 3391 return heap_data.amount_of_external_allocated_memory_ |
| 3392 - heap_data.amount_of_external_allocated_memory_at_last_global_gc_; |
3311 } | 3393 } |
3312 | 3394 |
3313 | 3395 |
3314 bool Heap::Setup(bool create_heap_objects) { | 3396 bool Heap::Setup(bool create_heap_objects) { |
| 3397 HeapData& heap_data = v8_context()->heap_data_; |
3315 // Initialize heap spaces and initial maps and objects. Whenever something | 3398 // Initialize heap spaces and initial maps and objects. Whenever something |
3316 // goes wrong, just return false. The caller should check the results and | 3399 // goes wrong, just return false. The caller should check the results and |
3317 // call Heap::TearDown() to release allocated memory. | 3400 // call Heap::TearDown() to release allocated memory. |
3318 // | 3401 // |
3319 // If the heap is not yet configured (eg, through the API), configure it. | 3402 // If the heap is not yet configured (eg, through the API), configure it. |
3320 // Configuration is based on the flags new-space-size (really the semispace | 3403 // Configuration is based on the flags new-space-size (really the semispace |
3321 // size) and old-space-size if set or the initial values of semispace_size_ | 3404 // size) and old-space-size if set or the initial values of semispace_size_ |
3322 // and old_generation_size_ otherwise. | 3405 // and old_generation_size_ otherwise. |
3323 if (!heap_configured) { | 3406 if (!heap_data.heap_configured_) { |
3324 if (!ConfigureHeapDefault()) return false; | 3407 if (!ConfigureHeapDefault()) return false; |
3325 } | 3408 } |
3326 | 3409 |
3327 // Setup memory allocator and reserve a chunk of memory for new | 3410 // Setup memory allocator and reserve a chunk of memory for new |
3328 // space. The chunk is double the size of the requested reserved | 3411 // space. The chunk is double the size of the requested reserved |
3329 // new space size to ensure that we can find a pair of semispaces that | 3412 // new space size to ensure that we can find a pair of semispaces that |
3330 // are contiguous and aligned to their size. | 3413 // are contiguous and aligned to their size. |
3331 if (!MemoryAllocator::Setup(MaxReserved())) return false; | 3414 if (!MemoryAllocator::Setup(MaxReserved())) return false; |
3332 void* chunk = | 3415 void* chunk = MemoryAllocator::ReserveInitialChunk( |
3333 MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_); | 3416 4 * heap_data.reserved_semispace_size_); |
3334 if (chunk == NULL) return false; | 3417 if (chunk == NULL) return false; |
3335 | 3418 |
3336 // Align the pair of semispaces to their size, which must be a power | 3419 // Align the pair of semispaces to their size, which must be a power |
3337 // of 2. | 3420 // of 2. |
3338 Address new_space_start = | 3421 Address new_space_start = |
3339 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_); | 3422 RoundUp(reinterpret_cast<byte*>(chunk), |
3340 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) { | 3423 2 * heap_data.reserved_semispace_size_); |
| 3424 if (!heap_data.new_space_.Setup(new_space_start, |
| 3425 2 * heap_data.reserved_semispace_size_)) { |
3341 return false; | 3426 return false; |
3342 } | 3427 } |
3343 | 3428 |
3344 // Initialize old pointer space. | 3429 // Initialize old pointer space. |
3345 old_pointer_space_ = | 3430 heap_data.old_pointer_space_ = new OldSpace( |
3346 new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE); | 3431 heap_data.max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE); |
3347 if (old_pointer_space_ == NULL) return false; | 3432 if (heap_data.old_pointer_space_ == NULL) return false; |
3348 if (!old_pointer_space_->Setup(NULL, 0)) return false; | 3433 if (!heap_data.old_pointer_space_->Setup(NULL, 0)) return false; |
3349 | 3434 |
3350 // Initialize old data space. | 3435 // Initialize old data space. |
3351 old_data_space_ = | 3436 heap_data.old_data_space_ = new OldSpace( |
3352 new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE); | 3437 heap_data.max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE); |
3353 if (old_data_space_ == NULL) return false; | 3438 if (heap_data.old_data_space_ == NULL) return false; |
3354 if (!old_data_space_->Setup(NULL, 0)) return false; | 3439 if (!heap_data.old_data_space_->Setup(NULL, 0)) return false; |
3355 | 3440 |
3356 // Initialize the code space, set its maximum capacity to the old | 3441 // Initialize the code space, set its maximum capacity to the old |
3357 // generation size. It needs executable memory. | 3442 // generation size. It needs executable memory. |
3358 // On 64-bit platform(s), we put all code objects in a 2 GB range of | 3443 // On 64-bit platform(s), we put all code objects in a 2 GB range of |
3359 // virtual address space, so that they can call each other with near calls. | 3444 // virtual address space, so that they can call each other with near calls. |
3360 if (code_range_size_ > 0) { | 3445 if (heap_data.code_range_size_ > 0) { |
3361 if (!CodeRange::Setup(code_range_size_)) { | 3446 if (!CodeRange::Setup(heap_data.code_range_size_)) { |
3362 return false; | 3447 return false; |
3363 } | 3448 } |
3364 } | 3449 } |
3365 | 3450 |
3366 code_space_ = | 3451 heap_data.code_space_ = |
3367 new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE); | 3452 new OldSpace(heap_data.max_old_generation_size_, CODE_SPACE, EXECUTABLE); |
3368 if (code_space_ == NULL) return false; | 3453 if (heap_data.code_space_ == NULL) return false; |
3369 if (!code_space_->Setup(NULL, 0)) return false; | 3454 if (!heap_data.code_space_->Setup(NULL, 0)) return false; |
3370 | 3455 |
3371 // Initialize map space. | 3456 // Initialize map space. |
3372 map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE); | 3457 heap_data.map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE); |
3373 if (map_space_ == NULL) return false; | 3458 if (heap_data.map_space_ == NULL) return false; |
3374 if (!map_space_->Setup(NULL, 0)) return false; | 3459 if (!heap_data.map_space_->Setup(NULL, 0)) return false; |
3375 | 3460 |
3376 // Initialize global property cell space. | 3461 // Initialize global property cell space. |
3377 cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE); | 3462 heap_data.cell_space_ = new CellSpace( |
3378 if (cell_space_ == NULL) return false; | 3463 heap_data.max_old_generation_size_, CELL_SPACE); |
3379 if (!cell_space_->Setup(NULL, 0)) return false; | 3464 if (heap_data.cell_space_ == NULL) return false; |
| 3465 if (!heap_data.cell_space_->Setup(NULL, 0)) return false; |
3380 | 3466 |
3381 // The large object code space may contain code or data. We set the memory | 3467 // The large object code space may contain code or data. We set the memory |
3382 // to be non-executable here for safety, but this means we need to enable it | 3468 // to be non-executable here for safety, but this means we need to enable it |
3383 // explicitly when allocating large code objects. | 3469 // explicitly when allocating large code objects. |
3384 lo_space_ = new LargeObjectSpace(LO_SPACE); | 3470 heap_data.lo_space_ = new LargeObjectSpace(LO_SPACE); |
3385 if (lo_space_ == NULL) return false; | 3471 if (heap_data.lo_space_ == NULL) return false; |
3386 if (!lo_space_->Setup()) return false; | 3472 if (!heap_data.lo_space_->Setup()) return false; |
3387 | 3473 |
3388 if (create_heap_objects) { | 3474 if (create_heap_objects) { |
3389 // Create initial maps. | 3475 // Create initial maps. |
3390 if (!CreateInitialMaps()) return false; | 3476 if (!CreateInitialMaps()) return false; |
3391 if (!CreateApiObjects()) return false; | 3477 if (!CreateApiObjects()) return false; |
3392 | 3478 |
3393 // Create initial objects | 3479 // Create initial objects |
3394 if (!CreateInitialObjects()) return false; | 3480 if (!CreateInitialObjects()) return false; |
3395 } | 3481 } |
3396 | 3482 |
3397 LOG(IntEvent("heap-capacity", Capacity())); | 3483 LOG(IntEvent("heap-capacity", Capacity())); |
3398 LOG(IntEvent("heap-available", Available())); | 3484 LOG(IntEvent("heap-available", Available())); |
3399 | 3485 |
3400 #ifdef ENABLE_LOGGING_AND_PROFILING | 3486 #ifdef ENABLE_LOGGING_AND_PROFILING |
3401 // This should be called only after initial objects have been created. | 3487 // This should be called only after initial objects have been created. |
3402 ProducerHeapProfile::Setup(); | 3488 ProducerHeapProfile::Setup(); |
3403 #endif | 3489 #endif |
3404 | 3490 |
| 3491 if (!heap_data.heap_private_data_.paged_rset_histogram_) { |
| 3492 heap_data.heap_private_data_.paged_rset_histogram_ = |
| 3493 StatsTable::CreateHistogram("V8.RSetPaged", 0, |
| 3494 Page::kObjectAreaSize / kPointerSize, 30); |
| 3495 } |
| 3496 |
3405 return true; | 3497 return true; |
3406 } | 3498 } |
3407 | 3499 |
3408 | 3500 |
3409 void Heap::SetStackLimits() { | 3501 void Heap::SetStackLimits() { |
| 3502 HeapData& heap_data = v8_context()->heap_data_; |
3410 // On 64 bit machines, pointers are generally out of range of Smis. We write | 3503 // On 64 bit machines, pointers are generally out of range of Smis. We write |
3411 // something that looks like an out of range Smi to the GC. | 3504 // something that looks like an out of range Smi to the GC. |
3412 | 3505 |
3413 // Set up the special root array entries containing the stack limits. | 3506 // Set up the special root array entries containing the stack limits. |
3414 // These are actually addresses, but the tag makes the GC ignore it. | 3507 // These are actually addresses, but the tag makes the GC ignore it. |
3415 roots_[kStackLimitRootIndex] = | 3508 heap_data.roots_[kStackLimitRootIndex] = |
3416 reinterpret_cast<Object*>( | 3509 reinterpret_cast<Object*>( |
3417 (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag); | 3510 (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag); |
3418 roots_[kRealStackLimitRootIndex] = | 3511 heap_data.roots_[kRealStackLimitRootIndex] = |
3419 reinterpret_cast<Object*>( | 3512 reinterpret_cast<Object*>( |
3420 (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag); | 3513 (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag); |
3421 } | 3514 } |
3422 | 3515 |
3423 | 3516 |
3424 void Heap::TearDown() { | 3517 void Heap::TearDown() { |
3425 GlobalHandles::TearDown(); | 3518 GlobalHandles::TearDown(); |
| 3519 HeapData& heap_data = v8_context()->heap_data_; |
| 3520 heap_data.new_space_.TearDown(); |
3426 | 3521 |
3427 new_space_.TearDown(); | 3522 if (heap_data.old_pointer_space_ != NULL) { |
3428 | 3523 heap_data.old_pointer_space_->TearDown(); |
3429 if (old_pointer_space_ != NULL) { | 3524 delete heap_data.old_pointer_space_; |
3430 old_pointer_space_->TearDown(); | 3525 heap_data.old_pointer_space_ = NULL; |
3431 delete old_pointer_space_; | |
3432 old_pointer_space_ = NULL; | |
3433 } | 3526 } |
3434 | 3527 |
3435 if (old_data_space_ != NULL) { | 3528 if (heap_data.old_data_space_ != NULL) { |
3436 old_data_space_->TearDown(); | 3529 heap_data.old_data_space_->TearDown(); |
3437 delete old_data_space_; | 3530 delete heap_data.old_data_space_; |
3438 old_data_space_ = NULL; | 3531 heap_data.old_data_space_ = NULL; |
3439 } | 3532 } |
3440 | 3533 |
3441 if (code_space_ != NULL) { | 3534 if (heap_data.code_space_ != NULL) { |
3442 code_space_->TearDown(); | 3535 heap_data.code_space_->TearDown(); |
3443 delete code_space_; | 3536 delete heap_data.code_space_; |
3444 code_space_ = NULL; | 3537 heap_data.code_space_ = NULL; |
3445 } | 3538 } |
3446 | 3539 |
3447 if (map_space_ != NULL) { | 3540 if (heap_data.map_space_ != NULL) { |
3448 map_space_->TearDown(); | 3541 heap_data.map_space_->TearDown(); |
3449 delete map_space_; | 3542 delete heap_data.map_space_; |
3450 map_space_ = NULL; | 3543 heap_data.map_space_ = NULL; |
3451 } | 3544 } |
3452 | 3545 |
3453 if (cell_space_ != NULL) { | 3546 if (heap_data.cell_space_ != NULL) { |
3454 cell_space_->TearDown(); | 3547 heap_data.cell_space_->TearDown(); |
3455 delete cell_space_; | 3548 delete heap_data.cell_space_; |
3456 cell_space_ = NULL; | 3549 heap_data.cell_space_ = NULL; |
3457 } | 3550 } |
3458 | 3551 |
3459 if (lo_space_ != NULL) { | 3552 if (heap_data.lo_space_ != NULL) { |
3460 lo_space_->TearDown(); | 3553 heap_data.lo_space_->TearDown(); |
3461 delete lo_space_; | 3554 delete heap_data.lo_space_; |
3462 lo_space_ = NULL; | 3555 heap_data.lo_space_ = NULL; |
3463 } | 3556 } |
3464 | 3557 |
3465 MemoryAllocator::TearDown(); | 3558 MemoryAllocator::TearDown(); |
3466 } | 3559 } |
3467 | 3560 |
3468 | 3561 |
3469 void Heap::Shrink() { | 3562 void Heap::Shrink() { |
3470 // Try to shrink all paged spaces. | 3563 // Try to shrink all paged spaces. |
3471 PagedSpaces spaces; | 3564 PagedSpaces spaces; |
3472 while (PagedSpace* space = spaces.next()) space->Shrink(); | 3565 while (PagedSpace* space = spaces.next()) space->Shrink(); |
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3691 | 3784 |
3692 void HeapIterator::reset() { | 3785 void HeapIterator::reset() { |
3693 // Restart the iterator. | 3786 // Restart the iterator. |
3694 Shutdown(); | 3787 Shutdown(); |
3695 Init(); | 3788 Init(); |
3696 } | 3789 } |
3697 | 3790 |
3698 | 3791 |
3699 #ifdef DEBUG | 3792 #ifdef DEBUG |
3700 | 3793 |
3701 static bool search_for_any_global; | |
3702 static Object* search_target; | |
3703 static bool found_target; | |
3704 static List<Object*> object_stack(20); | |
3705 | |
3706 | |
3707 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. | 3794 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. |
3708 static const int kMarkTag = 2; | 3795 static const int kMarkTag = 2; |
3709 | 3796 |
3710 static void MarkObjectRecursively(Object** p); | |
3711 class MarkObjectVisitor : public ObjectVisitor { | 3797 class MarkObjectVisitor : public ObjectVisitor { |
| 3798 HeapPrivateData* heap_private_data_; |
3712 public: | 3799 public: |
| 3800 explicit MarkObjectVisitor(HeapPrivateData* heap_private_data): |
| 3801 heap_private_data_(heap_private_data) {} |
| 3802 |
3713 void VisitPointers(Object** start, Object** end) { | 3803 void VisitPointers(Object** start, Object** end) { |
3714 // Copy all HeapObject pointers in [start, end) | 3804 // Copy all HeapObject pointers in [start, end) |
3715 for (Object** p = start; p < end; p++) { | 3805 for (Object** p = start; p < end; p++) { |
3716 if ((*p)->IsHeapObject()) | 3806 if ((*p)->IsHeapObject()) |
3717 MarkObjectRecursively(p); | 3807 MarkObjectRecursively(p); |
3718 } | 3808 } |
3719 } | 3809 } |
| 3810 void MarkObjectRecursively(Object** p); |
3720 }; | 3811 }; |
3721 | 3812 |
3722 static MarkObjectVisitor mark_visitor; | 3813 void MarkObjectVisitor::MarkObjectRecursively(Object** p) { |
3723 | |
3724 static void MarkObjectRecursively(Object** p) { | |
3725 if (!(*p)->IsHeapObject()) return; | 3814 if (!(*p)->IsHeapObject()) return; |
3726 | 3815 |
3727 HeapObject* obj = HeapObject::cast(*p); | 3816 HeapObject* obj = HeapObject::cast(*p); |
3728 | 3817 |
3729 Object* map = obj->map(); | 3818 Object* map = obj->map(); |
3730 | 3819 |
3731 if (!map->IsHeapObject()) return; // visited before | 3820 if (!map->IsHeapObject()) return; // visited before |
3732 | 3821 |
3733 if (found_target) return; // stop if target found | 3822 if (heap_private_data_->found_target_) return; // stop if target found |
3734 object_stack.Add(obj); | 3823 heap_private_data_->object_stack_.Add(obj); |
3735 if ((search_for_any_global && obj->IsJSGlobalObject()) || | 3824 if ((heap_private_data_->search_for_any_global_ && obj->IsJSGlobalObject()) || |
3736 (!search_for_any_global && (obj == search_target))) { | 3825 (!heap_private_data_->search_for_any_global_ && |
3737 found_target = true; | 3826 (obj == heap_private_data_->search_target_) |
| 3827 ) |
| 3828 ) { |
| 3829 heap_private_data_->found_target_ = true; |
3738 return; | 3830 return; |
3739 } | 3831 } |
3740 | 3832 |
3741 // not visited yet | 3833 // not visited yet |
3742 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map)); | 3834 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map)); |
3743 | 3835 |
3744 Address map_addr = map_p->address(); | 3836 Address map_addr = map_p->address(); |
3745 | 3837 |
3746 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag)); | 3838 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag)); |
3747 | 3839 |
3748 MarkObjectRecursively(&map); | 3840 MarkObjectRecursively(&map); |
3749 | 3841 |
3750 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p), | 3842 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p), |
3751 &mark_visitor); | 3843 this); |
3752 | 3844 |
3753 if (!found_target) // don't pop if found the target | 3845 if (!heap_private_data_->found_target_) // don't pop if found the target |
3754 object_stack.RemoveLast(); | 3846 heap_private_data_->object_stack_.RemoveLast(); |
3755 } | 3847 } |
3756 | 3848 |
3757 | 3849 |
3758 static void UnmarkObjectRecursively(Object** p); | |
3759 class UnmarkObjectVisitor : public ObjectVisitor { | 3850 class UnmarkObjectVisitor : public ObjectVisitor { |
3760 public: | 3851 public: |
3761 void VisitPointers(Object** start, Object** end) { | 3852 void VisitPointers(Object** start, Object** end) { |
3762 // Copy all HeapObject pointers in [start, end) | 3853 // Copy all HeapObject pointers in [start, end) |
3763 for (Object** p = start; p < end; p++) { | 3854 for (Object** p = start; p < end; p++) { |
3764 if ((*p)->IsHeapObject()) | 3855 if ((*p)->IsHeapObject()) |
3765 UnmarkObjectRecursively(p); | 3856 UnmarkObjectRecursively(p); |
3766 } | 3857 } |
3767 } | 3858 } |
| 3859 void UnmarkObjectRecursively(Object** p); |
3768 }; | 3860 }; |
3769 | 3861 |
3770 static UnmarkObjectVisitor unmark_visitor; | 3862 void UnmarkObjectVisitor::UnmarkObjectRecursively(Object** p) { |
3771 | |
3772 static void UnmarkObjectRecursively(Object** p) { | |
3773 if (!(*p)->IsHeapObject()) return; | 3863 if (!(*p)->IsHeapObject()) return; |
3774 | 3864 |
3775 HeapObject* obj = HeapObject::cast(*p); | 3865 HeapObject* obj = HeapObject::cast(*p); |
3776 | 3866 |
3777 Object* map = obj->map(); | 3867 Object* map = obj->map(); |
3778 | 3868 |
3779 if (map->IsHeapObject()) return; // unmarked already | 3869 if (map->IsHeapObject()) return; // unmarked already |
3780 | 3870 |
3781 Address map_addr = reinterpret_cast<Address>(map); | 3871 Address map_addr = reinterpret_cast<Address>(map); |
3782 | 3872 |
3783 map_addr -= kMarkTag; | 3873 map_addr -= kMarkTag; |
3784 | 3874 |
3785 ASSERT_TAG_ALIGNED(map_addr); | 3875 ASSERT_TAG_ALIGNED(map_addr); |
3786 | 3876 |
3787 HeapObject* map_p = HeapObject::FromAddress(map_addr); | 3877 HeapObject* map_p = HeapObject::FromAddress(map_addr); |
3788 | 3878 |
3789 obj->set_map(reinterpret_cast<Map*>(map_p)); | 3879 obj->set_map(reinterpret_cast<Map*>(map_p)); |
3790 | 3880 |
3791 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p)); | 3881 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p)); |
3792 | 3882 |
3793 obj->IterateBody(Map::cast(map_p)->instance_type(), | 3883 obj->IterateBody(Map::cast(map_p)->instance_type(), |
3794 obj->SizeFromMap(Map::cast(map_p)), | 3884 obj->SizeFromMap(Map::cast(map_p)), |
3795 &unmark_visitor); | 3885 this); |
3796 } | 3886 } |
3797 | 3887 |
3798 | 3888 |
3799 static void MarkRootObjectRecursively(Object** root) { | 3889 void HeapPrivateData::MarkRootObjectRecursively(Object** root) { |
3800 if (search_for_any_global) { | 3890 if (search_for_any_global_) { |
3801 ASSERT(search_target == NULL); | 3891 ASSERT(search_target_ == NULL); |
3802 } else { | 3892 } else { |
3803 ASSERT(search_target->IsHeapObject()); | 3893 ASSERT(search_target_->IsHeapObject()); |
3804 } | 3894 } |
3805 found_target = false; | 3895 found_target_ = false; |
3806 object_stack.Clear(); | 3896 object_stack_.Clear(); |
3807 | 3897 |
3808 MarkObjectRecursively(root); | 3898 MarkObjectVisitor mark_object_visitor(this); |
3809 UnmarkObjectRecursively(root); | 3899 mark_object_visitor.MarkObjectRecursively(root); |
| 3900 UnmarkObjectVisitor unmark_visitor; |
| 3901 unmark_visitor.UnmarkObjectRecursively(root); |
3810 | 3902 |
3811 if (found_target) { | 3903 if (found_target_) { |
3812 PrintF("=====================================\n"); | 3904 PrintF("=====================================\n"); |
3813 PrintF("==== Path to object ====\n"); | 3905 PrintF("==== Path to object ====\n"); |
3814 PrintF("=====================================\n\n"); | 3906 PrintF("=====================================\n\n"); |
3815 | 3907 |
3816 ASSERT(!object_stack.is_empty()); | 3908 ASSERT(!object_stack_.is_empty()); |
3817 for (int i = 0; i < object_stack.length(); i++) { | 3909 for (int i = 0; i < object_stack_ .length(); i++) { |
3818 if (i > 0) PrintF("\n |\n |\n V\n\n"); | 3910 if (i > 0) PrintF("\n |\n |\n V\n\n"); |
3819 Object* obj = object_stack[i]; | 3911 Object* obj = object_stack_[i]; |
3820 obj->Print(); | 3912 obj->Print(); |
3821 } | 3913 } |
3822 PrintF("=====================================\n"); | 3914 PrintF("=====================================\n"); |
3823 } | 3915 } |
3824 } | 3916 } |
3825 | 3917 |
3826 | 3918 |
3827 // Helper class for visiting HeapObjects recursively. | 3919 // Helper class for visiting HeapObjects recursively. |
3828 class MarkRootVisitor: public ObjectVisitor { | 3920 class MarkRootVisitor: public ObjectVisitor { |
| 3921 HeapPrivateData* heap_private_data_; |
3829 public: | 3922 public: |
| 3923 explicit MarkRootVisitor(HeapPrivateData* heap_private_data): |
| 3924 heap_private_data_(heap_private_data) {} |
3830 void VisitPointers(Object** start, Object** end) { | 3925 void VisitPointers(Object** start, Object** end) { |
3831 // Visit all HeapObject pointers in [start, end) | 3926 // Visit all HeapObject pointers in [start, end) |
3832 for (Object** p = start; p < end; p++) { | 3927 for (Object** p = start; p < end; p++) { |
3833 if ((*p)->IsHeapObject()) | 3928 if ((*p)->IsHeapObject()) |
3834 MarkRootObjectRecursively(p); | 3929 heap_private_data_->MarkRootObjectRecursively(p); |
3835 } | 3930 } |
3836 } | 3931 } |
3837 }; | 3932 }; |
3838 | 3933 |
3839 | 3934 |
3840 // Triggers a depth-first traversal of reachable objects from roots | 3935 // Triggers a depth-first traversal of reachable objects from roots |
3841 // and finds a path to a specific heap object and prints it. | 3936 // and finds a path to a specific heap object and prints it. |
3842 void Heap::TracePathToObject() { | 3937 void Heap::TracePathToObject() { |
3843 search_target = NULL; | 3938 HeapData& heap_data = v8_context()->heap_data_; |
3844 search_for_any_global = false; | 3939 heap_data.heap_private_data_.search_target_ = NULL; |
| 3940 heap_data.heap_private_data_.search_for_any_global_ = false; |
3845 | 3941 |
3846 MarkRootVisitor root_visitor; | 3942 MarkRootVisitor root_visitor(&heap_data.heap_private_data_); |
3847 IterateRoots(&root_visitor, VISIT_ONLY_STRONG); | 3943 IterateRoots(&root_visitor, VISIT_ONLY_STRONG); |
3848 } | 3944 } |
3849 | 3945 |
3850 | 3946 |
3851 // Triggers a depth-first traversal of reachable objects from roots | 3947 // Triggers a depth-first traversal of reachable objects from roots |
3852 // and finds a path to any global object and prints it. Useful for | 3948 // and finds a path to any global object and prints it. Useful for |
3853 // determining the source for leaks of global objects. | 3949 // determining the source for leaks of global objects. |
3854 void Heap::TracePathToGlobal() { | 3950 void Heap::TracePathToGlobal() { |
3855 search_target = NULL; | 3951 HeapData& heap_data = v8_context()->heap_data_; |
3856 search_for_any_global = true; | 3952 heap_data.heap_private_data_.search_target_ = NULL; |
| 3953 heap_data.heap_private_data_.search_for_any_global_ = true; |
3857 | 3954 |
3858 MarkRootVisitor root_visitor; | 3955 MarkRootVisitor root_visitor(&heap_data.heap_private_data_); |
3859 IterateRoots(&root_visitor, VISIT_ONLY_STRONG); | 3956 IterateRoots(&root_visitor, VISIT_ONLY_STRONG); |
3860 } | 3957 } |
3861 #endif | 3958 #endif |
3862 | 3959 |
3863 | 3960 |
3864 GCTracer::GCTracer() | 3961 GCTracer::GCTracer() |
3865 : start_time_(0.0), | 3962 : start_time_(0.0), |
3866 start_size_(0.0), | 3963 start_size_(0.0), |
3867 gc_count_(0), | 3964 gc_count_(0), |
3868 full_gc_count_(0), | 3965 full_gc_count_(0), |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3901 : "Mark-sweep"; | 3998 : "Mark-sweep"; |
3902 } | 3999 } |
3903 return "Unknown GC"; | 4000 return "Unknown GC"; |
3904 } | 4001 } |
3905 | 4002 |
3906 | 4003 |
3907 int KeyedLookupCache::Hash(Map* map, String* name) { | 4004 int KeyedLookupCache::Hash(Map* map, String* name) { |
3908 // Uses only lower 32 bits if pointers are larger. | 4005 // Uses only lower 32 bits if pointers are larger. |
3909 uintptr_t addr_hash = | 4006 uintptr_t addr_hash = |
3910 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> 2; | 4007 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> 2; |
3911 return (addr_hash ^ name->Hash()) % kLength; | 4008 return (addr_hash ^ name->Hash()) % KeyedLookupCacheData::kLength; |
3912 } | 4009 } |
3913 | 4010 |
3914 | 4011 |
3915 int KeyedLookupCache::Lookup(Map* map, String* name) { | 4012 int KeyedLookupCache::Lookup(Map* map, String* name) { |
3916 int index = Hash(map, name); | 4013 int index = Hash(map, name); |
3917 Key& key = keys_[index]; | 4014 KeyedLookupCacheData& keyed_lookup_cache_data = |
| 4015 v8_context()->keyed_lookup_cache_data_; |
| 4016 KeyedLookupCacheData::Key& key = keyed_lookup_cache_data.keys_[index]; |
3918 if ((key.map == map) && key.name->Equals(name)) { | 4017 if ((key.map == map) && key.name->Equals(name)) { |
3919 return field_offsets_[index]; | 4018 return keyed_lookup_cache_data.field_offsets_[index]; |
3920 } | 4019 } |
3921 return -1; | 4020 return -1; |
3922 } | 4021 } |
3923 | 4022 |
3924 | 4023 |
3925 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) { | 4024 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) { |
3926 String* symbol; | 4025 String* symbol; |
3927 if (Heap::LookupSymbolIfExists(name, &symbol)) { | 4026 if (Heap::LookupSymbolIfExists(name, &symbol)) { |
3928 int index = Hash(map, symbol); | 4027 int index = Hash(map, symbol); |
3929 Key& key = keys_[index]; | 4028 KeyedLookupCacheData& keyed_lookup_cache_data = |
| 4029 v8_context()->keyed_lookup_cache_data_; |
| 4030 KeyedLookupCacheData::Key& key = keyed_lookup_cache_data.keys_[index]; |
3930 key.map = map; | 4031 key.map = map; |
3931 key.name = symbol; | 4032 key.name = symbol; |
3932 field_offsets_[index] = field_offset; | 4033 keyed_lookup_cache_data.field_offsets_[index] = field_offset; |
3933 } | 4034 } |
3934 } | 4035 } |
3935 | 4036 |
3936 | 4037 |
3937 void KeyedLookupCache::Clear() { | 4038 void KeyedLookupCache::Clear() { |
3938 for (int index = 0; index < kLength; index++) keys_[index].map = NULL; | 4039 KeyedLookupCacheData& keyed_lookup_cache_data = |
| 4040 v8_context()->keyed_lookup_cache_data_; |
| 4041 for (int index = 0; index < KeyedLookupCacheData::kLength; index++) { |
| 4042 keyed_lookup_cache_data.keys_[index].map = NULL; |
| 4043 } |
| 4044 } |
| 4045 |
| 4046 void DescriptorLookupCache::Clear() { |
| 4047 DescriptorLookupCacheData& descriptor_lookup_cache_data = |
| 4048 v8_context()->descriptor_lookup_cache_data_; |
| 4049 for (int index = 0; index < DescriptorLookupCacheData::kLength; index++) { |
| 4050 descriptor_lookup_cache_data.keys_[index].array = NULL; |
| 4051 } |
3939 } | 4052 } |
3940 | 4053 |
3941 | 4054 |
3942 KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength]; | |
3943 | |
3944 | |
3945 int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength]; | |
3946 | |
3947 | |
3948 void DescriptorLookupCache::Clear() { | |
3949 for (int index = 0; index < kLength; index++) keys_[index].array = NULL; | |
3950 } | |
3951 | |
3952 | |
3953 DescriptorLookupCache::Key | |
3954 DescriptorLookupCache::keys_[DescriptorLookupCache::kLength]; | |
3955 | |
3956 int DescriptorLookupCache::results_[DescriptorLookupCache::kLength]; | |
3957 | 4055 |
3958 | 4056 |
3959 #ifdef DEBUG | 4057 #ifdef DEBUG |
3960 bool Heap::GarbageCollectionGreedyCheck() { | 4058 bool Heap::GarbageCollectionGreedyCheck() { |
3961 ASSERT(FLAG_gc_greedy); | 4059 ASSERT(FLAG_gc_greedy); |
3962 if (Bootstrapper::IsActive()) return true; | 4060 if (Bootstrapper::IsActive()) return true; |
3963 if (disallow_allocation_failure()) return true; | 4061 if (disallow_allocation_failure()) return true; |
3964 return CollectGarbage(0, NEW_SPACE); | 4062 return CollectGarbage(0, NEW_SPACE); |
3965 } | 4063 } |
3966 #endif | 4064 #endif |
3967 | 4065 |
3968 | 4066 |
3969 TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t) | 4067 TranscendentalCache::TranscendentalCache(Type t) |
3970 : type_(t) { | 4068 : type_(t) { |
3971 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't | 4069 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't |
3972 uint32_t in1 = 0xffffffffu; // generated by the FPU. | 4070 uint32_t in1 = 0xffffffffu; // generated by the FPU. |
3973 for (int i = 0; i < kCacheSize; i++) { | 4071 for (int i = 0; i < kCacheSize; i++) { |
3974 elements_[i].in[0] = in0; | 4072 elements_[i].in[0] = in0; |
3975 elements_[i].in[1] = in1; | 4073 elements_[i].in[1] = in1; |
3976 elements_[i].output = NULL; | 4074 elements_[i].output = NULL; |
3977 } | 4075 } |
3978 } | 4076 } |
3979 | 4077 |
3980 | 4078 |
3981 TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches]; | |
3982 | 4079 |
3983 | 4080 |
3984 void TranscendentalCache::Clear() { | 4081 void TranscendentalCache::Clear() { |
| 4082 TranscendentalCacheData& transcendental_cache_data = |
| 4083 v8_context()->transcendental_cache_data_; |
3985 for (int i = 0; i < kNumberOfCaches; i++) { | 4084 for (int i = 0; i < kNumberOfCaches; i++) { |
3986 if (caches_[i] != NULL) { | 4085 if (transcendental_cache_data.caches_[i] != NULL) { |
3987 delete caches_[i]; | 4086 delete transcendental_cache_data.caches_[i]; |
3988 caches_[i] = NULL; | 4087 transcendental_cache_data.caches_[i] = NULL; |
3989 } | 4088 } |
3990 } | 4089 } |
3991 } | 4090 } |
3992 | 4091 |
3993 | 4092 |
3994 } } // namespace v8::internal | 4093 } } // namespace v8::internal |
OLD | NEW |