OLD | NEW |
| (Empty) |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #ifndef V8_HEAP_H_ | |
6 #define V8_HEAP_H_ | |
7 | |
8 #include <cmath> | |
9 | |
10 #include "src/allocation.h" | |
11 #include "src/assert-scope.h" | |
12 #include "src/counters.h" | |
13 #include "src/gc-tracer.h" | |
14 #include "src/globals.h" | |
15 #include "src/incremental-marking.h" | |
16 #include "src/list.h" | |
17 #include "src/mark-compact.h" | |
18 #include "src/objects-visiting.h" | |
19 #include "src/spaces.h" | |
20 #include "src/splay-tree-inl.h" | |
21 #include "src/store-buffer.h" | |
22 | |
23 namespace v8 { | |
24 namespace internal { | |
25 | |
26 // Defines all the roots in Heap. | |
27 #define STRONG_ROOT_LIST(V) \ | |
28 V(Map, byte_array_map, ByteArrayMap) \ | |
29 V(Map, free_space_map, FreeSpaceMap) \ | |
30 V(Map, one_pointer_filler_map, OnePointerFillerMap) \ | |
31 V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ | |
32 /* Cluster the most popular ones in a few cache lines here at the top. */ \ | |
33 V(Smi, store_buffer_top, StoreBufferTop) \ | |
34 V(Oddball, undefined_value, UndefinedValue) \ | |
35 V(Oddball, the_hole_value, TheHoleValue) \ | |
36 V(Oddball, null_value, NullValue) \ | |
37 V(Oddball, true_value, TrueValue) \ | |
38 V(Oddball, false_value, FalseValue) \ | |
39 V(Oddball, uninitialized_value, UninitializedValue) \ | |
40 V(Oddball, exception, Exception) \ | |
41 V(Map, cell_map, CellMap) \ | |
42 V(Map, global_property_cell_map, GlobalPropertyCellMap) \ | |
43 V(Map, shared_function_info_map, SharedFunctionInfoMap) \ | |
44 V(Map, meta_map, MetaMap) \ | |
45 V(Map, heap_number_map, HeapNumberMap) \ | |
46 V(Map, mutable_heap_number_map, MutableHeapNumberMap) \ | |
47 V(Map, native_context_map, NativeContextMap) \ | |
48 V(Map, fixed_array_map, FixedArrayMap) \ | |
49 V(Map, code_map, CodeMap) \ | |
50 V(Map, scope_info_map, ScopeInfoMap) \ | |
51 V(Map, fixed_cow_array_map, FixedCOWArrayMap) \ | |
52 V(Map, fixed_double_array_map, FixedDoubleArrayMap) \ | |
53 V(Map, constant_pool_array_map, ConstantPoolArrayMap) \ | |
54 V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ | |
55 V(Map, hash_table_map, HashTableMap) \ | |
56 V(Map, ordered_hash_table_map, OrderedHashTableMap) \ | |
57 V(FixedArray, empty_fixed_array, EmptyFixedArray) \ | |
58 V(ByteArray, empty_byte_array, EmptyByteArray) \ | |
59 V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ | |
60 V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \ | |
61 V(Oddball, arguments_marker, ArgumentsMarker) \ | |
62 /* The roots above this line should be boring from a GC point of view. */ \ | |
63 /* This means they are never in new space and never on a page that is */ \ | |
64 /* being compacted. */ \ | |
65 V(FixedArray, number_string_cache, NumberStringCache) \ | |
66 V(Object, instanceof_cache_function, InstanceofCacheFunction) \ | |
67 V(Object, instanceof_cache_map, InstanceofCacheMap) \ | |
68 V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \ | |
69 V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ | |
70 V(FixedArray, string_split_cache, StringSplitCache) \ | |
71 V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \ | |
72 V(Oddball, termination_exception, TerminationException) \ | |
73 V(Smi, hash_seed, HashSeed) \ | |
74 V(Map, symbol_map, SymbolMap) \ | |
75 V(Map, string_map, StringMap) \ | |
76 V(Map, ascii_string_map, AsciiStringMap) \ | |
77 V(Map, cons_string_map, ConsStringMap) \ | |
78 V(Map, cons_ascii_string_map, ConsAsciiStringMap) \ | |
79 V(Map, sliced_string_map, SlicedStringMap) \ | |
80 V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \ | |
81 V(Map, external_string_map, ExternalStringMap) \ | |
82 V(Map, external_string_with_one_byte_data_map, \ | |
83 ExternalStringWithOneByteDataMap) \ | |
84 V(Map, external_ascii_string_map, ExternalAsciiStringMap) \ | |
85 V(Map, short_external_string_map, ShortExternalStringMap) \ | |
86 V(Map, short_external_string_with_one_byte_data_map, \ | |
87 ShortExternalStringWithOneByteDataMap) \ | |
88 V(Map, internalized_string_map, InternalizedStringMap) \ | |
89 V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \ | |
90 V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \ | |
91 V(Map, external_internalized_string_with_one_byte_data_map, \ | |
92 ExternalInternalizedStringWithOneByteDataMap) \ | |
93 V(Map, external_ascii_internalized_string_map, \ | |
94 ExternalAsciiInternalizedStringMap) \ | |
95 V(Map, short_external_internalized_string_map, \ | |
96 ShortExternalInternalizedStringMap) \ | |
97 V(Map, short_external_internalized_string_with_one_byte_data_map, \ | |
98 ShortExternalInternalizedStringWithOneByteDataMap) \ | |
99 V(Map, short_external_ascii_internalized_string_map, \ | |
100 ShortExternalAsciiInternalizedStringMap) \ | |
101 V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \ | |
102 V(Map, undetectable_string_map, UndetectableStringMap) \ | |
103 V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \ | |
104 V(Map, external_int8_array_map, ExternalInt8ArrayMap) \ | |
105 V(Map, external_uint8_array_map, ExternalUint8ArrayMap) \ | |
106 V(Map, external_int16_array_map, ExternalInt16ArrayMap) \ | |
107 V(Map, external_uint16_array_map, ExternalUint16ArrayMap) \ | |
108 V(Map, external_int32_array_map, ExternalInt32ArrayMap) \ | |
109 V(Map, external_uint32_array_map, ExternalUint32ArrayMap) \ | |
110 V(Map, external_float32_array_map, ExternalFloat32ArrayMap) \ | |
111 V(Map, external_float64_array_map, ExternalFloat64ArrayMap) \ | |
112 V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap) \ | |
113 V(ExternalArray, empty_external_int8_array, EmptyExternalInt8Array) \ | |
114 V(ExternalArray, empty_external_uint8_array, EmptyExternalUint8Array) \ | |
115 V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array) \ | |
116 V(ExternalArray, empty_external_uint16_array, EmptyExternalUint16Array) \ | |
117 V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array) \ | |
118 V(ExternalArray, empty_external_uint32_array, EmptyExternalUint32Array) \ | |
119 V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array) \ | |
120 V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array) \ | |
121 V(ExternalArray, empty_external_uint8_clamped_array, \ | |
122 EmptyExternalUint8ClampedArray) \ | |
123 V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \ | |
124 V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \ | |
125 V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \ | |
126 V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \ | |
127 V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \ | |
128 V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \ | |
129 V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \ | |
130 V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \ | |
131 V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \ | |
132 V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \ | |
133 V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \ | |
134 V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \ | |
135 V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \ | |
136 V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \ | |
137 V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \ | |
138 V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \ | |
139 V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \ | |
140 V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \ | |
141 EmptyFixedUint8ClampedArray) \ | |
142 V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \ | |
143 V(Map, function_context_map, FunctionContextMap) \ | |
144 V(Map, catch_context_map, CatchContextMap) \ | |
145 V(Map, with_context_map, WithContextMap) \ | |
146 V(Map, block_context_map, BlockContextMap) \ | |
147 V(Map, module_context_map, ModuleContextMap) \ | |
148 V(Map, global_context_map, GlobalContextMap) \ | |
149 V(Map, undefined_map, UndefinedMap) \ | |
150 V(Map, the_hole_map, TheHoleMap) \ | |
151 V(Map, null_map, NullMap) \ | |
152 V(Map, boolean_map, BooleanMap) \ | |
153 V(Map, uninitialized_map, UninitializedMap) \ | |
154 V(Map, arguments_marker_map, ArgumentsMarkerMap) \ | |
155 V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \ | |
156 V(Map, exception_map, ExceptionMap) \ | |
157 V(Map, termination_exception_map, TerminationExceptionMap) \ | |
158 V(Map, message_object_map, JSMessageObjectMap) \ | |
159 V(Map, foreign_map, ForeignMap) \ | |
160 V(HeapNumber, nan_value, NanValue) \ | |
161 V(HeapNumber, infinity_value, InfinityValue) \ | |
162 V(HeapNumber, minus_zero_value, MinusZeroValue) \ | |
163 V(Map, neander_map, NeanderMap) \ | |
164 V(JSObject, message_listeners, MessageListeners) \ | |
165 V(UnseededNumberDictionary, code_stubs, CodeStubs) \ | |
166 V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \ | |
167 V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \ | |
168 V(Code, js_entry_code, JsEntryCode) \ | |
169 V(Code, js_construct_entry_code, JsConstructEntryCode) \ | |
170 V(FixedArray, natives_source_cache, NativesSourceCache) \ | |
171 V(Script, empty_script, EmptyScript) \ | |
172 V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \ | |
173 V(Cell, undefined_cell, UndefineCell) \ | |
174 V(JSObject, observation_state, ObservationState) \ | |
175 V(Map, external_map, ExternalMap) \ | |
176 V(Object, symbol_registry, SymbolRegistry) \ | |
177 V(Symbol, frozen_symbol, FrozenSymbol) \ | |
178 V(Symbol, nonexistent_symbol, NonExistentSymbol) \ | |
179 V(Symbol, elements_transition_symbol, ElementsTransitionSymbol) \ | |
180 V(SeededNumberDictionary, empty_slow_element_dictionary, \ | |
181 EmptySlowElementDictionary) \ | |
182 V(Symbol, observed_symbol, ObservedSymbol) \ | |
183 V(Symbol, uninitialized_symbol, UninitializedSymbol) \ | |
184 V(Symbol, megamorphic_symbol, MegamorphicSymbol) \ | |
185 V(Symbol, stack_trace_symbol, StackTraceSymbol) \ | |
186 V(Symbol, detailed_stack_trace_symbol, DetailedStackTraceSymbol) \ | |
187 V(Symbol, normal_ic_symbol, NormalICSymbol) \ | |
188 V(FixedArray, materialized_objects, MaterializedObjects) \ | |
189 V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \ | |
190 V(FixedArray, microtask_queue, MicrotaskQueue) | |
191 | |
192 // Entries in this list are limited to Smis and are not visited during GC. | |
193 #define SMI_ROOT_LIST(V) \ | |
194 V(Smi, stack_limit, StackLimit) \ | |
195 V(Smi, real_stack_limit, RealStackLimit) \ | |
196 V(Smi, last_script_id, LastScriptId) \ | |
197 V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \ | |
198 V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \ | |
199 V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \ | |
200 V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) | |
201 | |
202 #define ROOT_LIST(V) \ | |
203 STRONG_ROOT_LIST(V) \ | |
204 SMI_ROOT_LIST(V) \ | |
205 V(StringTable, string_table, StringTable) | |
206 | |
207 // Heap roots that are known to be immortal immovable, for which we can safely | |
208 // skip write barriers. | |
209 #define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \ | |
210 V(byte_array_map) \ | |
211 V(free_space_map) \ | |
212 V(one_pointer_filler_map) \ | |
213 V(two_pointer_filler_map) \ | |
214 V(undefined_value) \ | |
215 V(the_hole_value) \ | |
216 V(null_value) \ | |
217 V(true_value) \ | |
218 V(false_value) \ | |
219 V(uninitialized_value) \ | |
220 V(cell_map) \ | |
221 V(global_property_cell_map) \ | |
222 V(shared_function_info_map) \ | |
223 V(meta_map) \ | |
224 V(heap_number_map) \ | |
225 V(mutable_heap_number_map) \ | |
226 V(native_context_map) \ | |
227 V(fixed_array_map) \ | |
228 V(code_map) \ | |
229 V(scope_info_map) \ | |
230 V(fixed_cow_array_map) \ | |
231 V(fixed_double_array_map) \ | |
232 V(constant_pool_array_map) \ | |
233 V(no_interceptor_result_sentinel) \ | |
234 V(hash_table_map) \ | |
235 V(ordered_hash_table_map) \ | |
236 V(empty_fixed_array) \ | |
237 V(empty_byte_array) \ | |
238 V(empty_descriptor_array) \ | |
239 V(empty_constant_pool_array) \ | |
240 V(arguments_marker) \ | |
241 V(symbol_map) \ | |
242 V(sloppy_arguments_elements_map) \ | |
243 V(function_context_map) \ | |
244 V(catch_context_map) \ | |
245 V(with_context_map) \ | |
246 V(block_context_map) \ | |
247 V(module_context_map) \ | |
248 V(global_context_map) \ | |
249 V(undefined_map) \ | |
250 V(the_hole_map) \ | |
251 V(null_map) \ | |
252 V(boolean_map) \ | |
253 V(uninitialized_map) \ | |
254 V(message_object_map) \ | |
255 V(foreign_map) \ | |
256 V(neander_map) | |
257 | |
258 #define INTERNALIZED_STRING_LIST(V) \ | |
259 V(Array_string, "Array") \ | |
260 V(Object_string, "Object") \ | |
261 V(proto_string, "__proto__") \ | |
262 V(arguments_string, "arguments") \ | |
263 V(Arguments_string, "Arguments") \ | |
264 V(call_string, "call") \ | |
265 V(apply_string, "apply") \ | |
266 V(caller_string, "caller") \ | |
267 V(boolean_string, "boolean") \ | |
268 V(Boolean_string, "Boolean") \ | |
269 V(callee_string, "callee") \ | |
270 V(constructor_string, "constructor") \ | |
271 V(dot_result_string, ".result") \ | |
272 V(dot_for_string, ".for.") \ | |
273 V(eval_string, "eval") \ | |
274 V(empty_string, "") \ | |
275 V(function_string, "function") \ | |
276 V(length_string, "length") \ | |
277 V(name_string, "name") \ | |
278 V(null_string, "null") \ | |
279 V(number_string, "number") \ | |
280 V(Number_string, "Number") \ | |
281 V(nan_string, "NaN") \ | |
282 V(RegExp_string, "RegExp") \ | |
283 V(source_string, "source") \ | |
284 V(source_url_string, "source_url") \ | |
285 V(source_mapping_url_string, "source_mapping_url") \ | |
286 V(global_string, "global") \ | |
287 V(ignore_case_string, "ignoreCase") \ | |
288 V(multiline_string, "multiline") \ | |
289 V(input_string, "input") \ | |
290 V(index_string, "index") \ | |
291 V(last_index_string, "lastIndex") \ | |
292 V(object_string, "object") \ | |
293 V(literals_string, "literals") \ | |
294 V(prototype_string, "prototype") \ | |
295 V(string_string, "string") \ | |
296 V(String_string, "String") \ | |
297 V(symbol_string, "symbol") \ | |
298 V(Symbol_string, "Symbol") \ | |
299 V(for_string, "for") \ | |
300 V(for_api_string, "for_api") \ | |
301 V(for_intern_string, "for_intern") \ | |
302 V(private_api_string, "private_api") \ | |
303 V(private_intern_string, "private_intern") \ | |
304 V(Date_string, "Date") \ | |
305 V(to_string_string, "toString") \ | |
306 V(char_at_string, "CharAt") \ | |
307 V(undefined_string, "undefined") \ | |
308 V(value_of_string, "valueOf") \ | |
309 V(stack_string, "stack") \ | |
310 V(toJSON_string, "toJSON") \ | |
311 V(InitializeVarGlobal_string, "InitializeVarGlobal") \ | |
312 V(InitializeConstGlobal_string, "InitializeConstGlobal") \ | |
313 V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \ | |
314 V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \ | |
315 V(stack_overflow_string, "kStackOverflowBoilerplate") \ | |
316 V(illegal_access_string, "illegal access") \ | |
317 V(get_string, "get") \ | |
318 V(set_string, "set") \ | |
319 V(map_field_string, "%map") \ | |
320 V(elements_field_string, "%elements") \ | |
321 V(length_field_string, "%length") \ | |
322 V(cell_value_string, "%cell_value") \ | |
323 V(function_class_string, "Function") \ | |
324 V(illegal_argument_string, "illegal argument") \ | |
325 V(space_string, " ") \ | |
326 V(exec_string, "exec") \ | |
327 V(zero_string, "0") \ | |
328 V(global_eval_string, "GlobalEval") \ | |
329 V(identity_hash_string, "v8::IdentityHash") \ | |
330 V(closure_string, "(closure)") \ | |
331 V(dot_string, ".") \ | |
332 V(compare_ic_string, "==") \ | |
333 V(strict_compare_ic_string, "===") \ | |
334 V(infinity_string, "Infinity") \ | |
335 V(minus_infinity_string, "-Infinity") \ | |
336 V(query_colon_string, "(?:)") \ | |
337 V(Generator_string, "Generator") \ | |
338 V(throw_string, "throw") \ | |
339 V(done_string, "done") \ | |
340 V(value_string, "value") \ | |
341 V(next_string, "next") \ | |
342 V(byte_length_string, "byteLength") \ | |
343 V(byte_offset_string, "byteOffset") \ | |
344 V(buffer_string, "buffer") \ | |
345 V(intl_initialized_marker_string, "v8::intl_initialized_marker") \ | |
346 V(intl_impl_object_string, "v8::intl_object") | |
347 | |
348 // Forward declarations. | |
349 class HeapStats; | |
350 class Isolate; | |
351 class WeakObjectRetainer; | |
352 | |
353 | |
354 typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, | |
355 Object** pointer); | |
356 | |
357 class StoreBufferRebuilder { | |
358 public: | |
359 explicit StoreBufferRebuilder(StoreBuffer* store_buffer) | |
360 : store_buffer_(store_buffer) { | |
361 } | |
362 | |
363 void Callback(MemoryChunk* page, StoreBufferEvent event); | |
364 | |
365 private: | |
366 StoreBuffer* store_buffer_; | |
367 | |
368 // We record in this variable how full the store buffer was when we started | |
369 // iterating over the current page, finding pointers to new space. If the | |
370 // store buffer overflows again we can exempt the page from the store buffer | |
371 // by rewinding to this point instead of having to search the store buffer. | |
372 Object*** start_of_current_page_; | |
373 // The current page we are scanning in the store buffer iterator. | |
374 MemoryChunk* current_page_; | |
375 }; | |
376 | |
377 | |
378 | |
379 // A queue of objects promoted during scavenge. Each object is accompanied | |
380 // by it's size to avoid dereferencing a map pointer for scanning. | |
381 class PromotionQueue { | |
382 public: | |
383 explicit PromotionQueue(Heap* heap) | |
384 : front_(NULL), | |
385 rear_(NULL), | |
386 limit_(NULL), | |
387 emergency_stack_(0), | |
388 heap_(heap) { } | |
389 | |
390 void Initialize(); | |
391 | |
392 void Destroy() { | |
393 DCHECK(is_empty()); | |
394 delete emergency_stack_; | |
395 emergency_stack_ = NULL; | |
396 } | |
397 | |
398 inline void ActivateGuardIfOnTheSamePage(); | |
399 | |
400 Page* GetHeadPage() { | |
401 return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); | |
402 } | |
403 | |
404 void SetNewLimit(Address limit) { | |
405 if (!guard_) { | |
406 return; | |
407 } | |
408 | |
409 DCHECK(GetHeadPage() == Page::FromAllocationTop(limit)); | |
410 limit_ = reinterpret_cast<intptr_t*>(limit); | |
411 | |
412 if (limit_ <= rear_) { | |
413 return; | |
414 } | |
415 | |
416 RelocateQueueHead(); | |
417 } | |
418 | |
419 bool IsBelowPromotionQueue(Address to_space_top) { | |
420 // If the given to-space top pointer and the head of the promotion queue | |
421 // are not on the same page, then the to-space objects are below the | |
422 // promotion queue. | |
423 if (GetHeadPage() != Page::FromAddress(to_space_top)) { | |
424 return true; | |
425 } | |
426 // If the to space top pointer is smaller or equal than the promotion | |
427 // queue head, then the to-space objects are below the promotion queue. | |
428 return reinterpret_cast<intptr_t*>(to_space_top) <= rear_; | |
429 } | |
430 | |
431 bool is_empty() { | |
432 return (front_ == rear_) && | |
433 (emergency_stack_ == NULL || emergency_stack_->length() == 0); | |
434 } | |
435 | |
436 inline void insert(HeapObject* target, int size); | |
437 | |
438 void remove(HeapObject** target, int* size) { | |
439 DCHECK(!is_empty()); | |
440 if (front_ == rear_) { | |
441 Entry e = emergency_stack_->RemoveLast(); | |
442 *target = e.obj_; | |
443 *size = e.size_; | |
444 return; | |
445 } | |
446 | |
447 if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) { | |
448 NewSpacePage* front_page = | |
449 NewSpacePage::FromAddress(reinterpret_cast<Address>(front_)); | |
450 DCHECK(!front_page->prev_page()->is_anchor()); | |
451 front_ = | |
452 reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end()); | |
453 } | |
454 *target = reinterpret_cast<HeapObject*>(*(--front_)); | |
455 *size = static_cast<int>(*(--front_)); | |
456 // Assert no underflow. | |
457 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), | |
458 reinterpret_cast<Address>(front_)); | |
459 } | |
460 | |
461 private: | |
462 // The front of the queue is higher in the memory page chain than the rear. | |
463 intptr_t* front_; | |
464 intptr_t* rear_; | |
465 intptr_t* limit_; | |
466 | |
467 bool guard_; | |
468 | |
469 static const int kEntrySizeInWords = 2; | |
470 | |
471 struct Entry { | |
472 Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { } | |
473 | |
474 HeapObject* obj_; | |
475 int size_; | |
476 }; | |
477 List<Entry>* emergency_stack_; | |
478 | |
479 Heap* heap_; | |
480 | |
481 void RelocateQueueHead(); | |
482 | |
483 DISALLOW_COPY_AND_ASSIGN(PromotionQueue); | |
484 }; | |
485 | |
486 | |
487 typedef void (*ScavengingCallback)(Map* map, | |
488 HeapObject** slot, | |
489 HeapObject* object); | |
490 | |
491 | |
492 // External strings table is a place where all external strings are | |
493 // registered. We need to keep track of such strings to properly | |
494 // finalize them. | |
495 class ExternalStringTable { | |
496 public: | |
497 // Registers an external string. | |
498 inline void AddString(String* string); | |
499 | |
500 inline void Iterate(ObjectVisitor* v); | |
501 | |
502 // Restores internal invariant and gets rid of collected strings. | |
503 // Must be called after each Iterate() that modified the strings. | |
504 void CleanUp(); | |
505 | |
506 // Destroys all allocated memory. | |
507 void TearDown(); | |
508 | |
509 private: | |
510 explicit ExternalStringTable(Heap* heap) : heap_(heap) { } | |
511 | |
512 friend class Heap; | |
513 | |
514 inline void Verify(); | |
515 | |
516 inline void AddOldString(String* string); | |
517 | |
518 // Notifies the table that only a prefix of the new list is valid. | |
519 inline void ShrinkNewStrings(int position); | |
520 | |
521 // To speed up scavenge collections new space string are kept | |
522 // separate from old space strings. | |
523 List<Object*> new_space_strings_; | |
524 List<Object*> old_space_strings_; | |
525 | |
526 Heap* heap_; | |
527 | |
528 DISALLOW_COPY_AND_ASSIGN(ExternalStringTable); | |
529 }; | |
530 | |
531 | |
532 enum ArrayStorageAllocationMode { | |
533 DONT_INITIALIZE_ARRAY_ELEMENTS, | |
534 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE | |
535 }; | |
536 | |
537 | |
538 class Heap { | |
539 public: | |
540 // Configure heap size in MB before setup. Return false if the heap has been | |
541 // set up already. | |
542 bool ConfigureHeap(int max_semi_space_size, | |
543 int max_old_space_size, | |
544 int max_executable_size, | |
545 size_t code_range_size); | |
546 bool ConfigureHeapDefault(); | |
547 | |
548 // Prepares the heap, setting up memory areas that are needed in the isolate | |
549 // without actually creating any objects. | |
550 bool SetUp(); | |
551 | |
552 // Bootstraps the object heap with the core set of objects required to run. | |
553 // Returns whether it succeeded. | |
554 bool CreateHeapObjects(); | |
555 | |
556 // Destroys all memory allocated by the heap. | |
557 void TearDown(); | |
558 | |
559 // Set the stack limit in the roots_ array. Some architectures generate | |
560 // code that looks here, because it is faster than loading from the static | |
561 // jslimit_/real_jslimit_ variable in the StackGuard. | |
562 void SetStackLimits(); | |
563 | |
564 // Returns whether SetUp has been called. | |
565 bool HasBeenSetUp(); | |
566 | |
567 // Returns the maximum amount of memory reserved for the heap. For | |
568 // the young generation, we reserve 4 times the amount needed for a | |
569 // semi space. The young generation consists of two semi spaces and | |
570 // we reserve twice the amount needed for those in order to ensure | |
571 // that new space can be aligned to its size. | |
572 intptr_t MaxReserved() { | |
573 return 4 * reserved_semispace_size_ + max_old_generation_size_; | |
574 } | |
575 int MaxSemiSpaceSize() { return max_semi_space_size_; } | |
576 int ReservedSemiSpaceSize() { return reserved_semispace_size_; } | |
577 int InitialSemiSpaceSize() { return initial_semispace_size_; } | |
578 intptr_t MaxOldGenerationSize() { return max_old_generation_size_; } | |
579 intptr_t MaxExecutableSize() { return max_executable_size_; } | |
580 | |
581 // Returns the capacity of the heap in bytes w/o growing. Heap grows when | |
582 // more spaces are needed until it reaches the limit. | |
583 intptr_t Capacity(); | |
584 | |
585 // Returns the amount of memory currently committed for the heap. | |
586 intptr_t CommittedMemory(); | |
587 | |
588 // Returns the amount of executable memory currently committed for the heap. | |
589 intptr_t CommittedMemoryExecutable(); | |
590 | |
591 // Returns the amount of phyical memory currently committed for the heap. | |
592 size_t CommittedPhysicalMemory(); | |
593 | |
594 // Returns the maximum amount of memory ever committed for the heap. | |
595 intptr_t MaximumCommittedMemory() { return maximum_committed_; } | |
596 | |
597 // Updates the maximum committed memory for the heap. Should be called | |
598 // whenever a space grows. | |
599 void UpdateMaximumCommitted(); | |
600 | |
601 // Returns the available bytes in space w/o growing. | |
602 // Heap doesn't guarantee that it can allocate an object that requires | |
603 // all available bytes. Check MaxHeapObjectSize() instead. | |
604 intptr_t Available(); | |
605 | |
606 // Returns of size of all objects residing in the heap. | |
607 intptr_t SizeOfObjects(); | |
608 | |
609 // Return the starting address and a mask for the new space. And-masking an | |
610 // address with the mask will result in the start address of the new space | |
611 // for all addresses in either semispace. | |
612 Address NewSpaceStart() { return new_space_.start(); } | |
613 uintptr_t NewSpaceMask() { return new_space_.mask(); } | |
614 Address NewSpaceTop() { return new_space_.top(); } | |
615 | |
616 NewSpace* new_space() { return &new_space_; } | |
617 OldSpace* old_pointer_space() { return old_pointer_space_; } | |
618 OldSpace* old_data_space() { return old_data_space_; } | |
619 OldSpace* code_space() { return code_space_; } | |
620 MapSpace* map_space() { return map_space_; } | |
621 CellSpace* cell_space() { return cell_space_; } | |
622 PropertyCellSpace* property_cell_space() { | |
623 return property_cell_space_; | |
624 } | |
625 LargeObjectSpace* lo_space() { return lo_space_; } | |
626 PagedSpace* paged_space(int idx) { | |
627 switch (idx) { | |
628 case OLD_POINTER_SPACE: | |
629 return old_pointer_space(); | |
630 case OLD_DATA_SPACE: | |
631 return old_data_space(); | |
632 case MAP_SPACE: | |
633 return map_space(); | |
634 case CELL_SPACE: | |
635 return cell_space(); | |
636 case PROPERTY_CELL_SPACE: | |
637 return property_cell_space(); | |
638 case CODE_SPACE: | |
639 return code_space(); | |
640 case NEW_SPACE: | |
641 case LO_SPACE: | |
642 UNREACHABLE(); | |
643 } | |
644 return NULL; | |
645 } | |
646 | |
647 bool always_allocate() { return always_allocate_scope_depth_ != 0; } | |
648 Address always_allocate_scope_depth_address() { | |
649 return reinterpret_cast<Address>(&always_allocate_scope_depth_); | |
650 } | |
651 | |
652 Address* NewSpaceAllocationTopAddress() { | |
653 return new_space_.allocation_top_address(); | |
654 } | |
655 Address* NewSpaceAllocationLimitAddress() { | |
656 return new_space_.allocation_limit_address(); | |
657 } | |
658 | |
659 Address* OldPointerSpaceAllocationTopAddress() { | |
660 return old_pointer_space_->allocation_top_address(); | |
661 } | |
662 Address* OldPointerSpaceAllocationLimitAddress() { | |
663 return old_pointer_space_->allocation_limit_address(); | |
664 } | |
665 | |
666 Address* OldDataSpaceAllocationTopAddress() { | |
667 return old_data_space_->allocation_top_address(); | |
668 } | |
669 Address* OldDataSpaceAllocationLimitAddress() { | |
670 return old_data_space_->allocation_limit_address(); | |
671 } | |
672 | |
673 // Returns a deep copy of the JavaScript object. | |
674 // Properties and elements are copied too. | |
675 // Optionally takes an AllocationSite to be appended in an AllocationMemento. | |
676 MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source, | |
677 AllocationSite* site = NULL); | |
678 | |
679 // Clear the Instanceof cache (used when a prototype changes). | |
680 inline void ClearInstanceofCache(); | |
681 | |
682 // Iterates the whole code space to clear all ICs of the given kind. | |
683 void ClearAllICsByKind(Code::Kind kind); | |
684 | |
685 // For use during bootup. | |
686 void RepairFreeListsAfterBoot(); | |
687 | |
688 template<typename T> | |
689 static inline bool IsOneByte(T t, int chars); | |
690 | |
691 // Move len elements within a given array from src_index index to dst_index | |
692 // index. | |
693 void MoveElements(FixedArray* array, int dst_index, int src_index, int len); | |
694 | |
695 // Sloppy mode arguments object size. | |
696 static const int kSloppyArgumentsObjectSize = | |
697 JSObject::kHeaderSize + 2 * kPointerSize; | |
698 // Strict mode arguments has no callee so it is smaller. | |
699 static const int kStrictArgumentsObjectSize = | |
700 JSObject::kHeaderSize + 1 * kPointerSize; | |
701 // Indicies for direct access into argument objects. | |
702 static const int kArgumentsLengthIndex = 0; | |
703 // callee is only valid in sloppy mode. | |
704 static const int kArgumentsCalleeIndex = 1; | |
705 | |
706 // Finalizes an external string by deleting the associated external | |
707 // data and clearing the resource pointer. | |
708 inline void FinalizeExternalString(String* string); | |
709 | |
710 // Initialize a filler object to keep the ability to iterate over the heap | |
711 // when shortening objects. | |
712 void CreateFillerObjectAt(Address addr, int size); | |
713 | |
714 bool CanMoveObjectStart(HeapObject* object); | |
715 | |
716 enum InvocationMode { FROM_GC, FROM_MUTATOR }; | |
717 | |
718 // Maintain marking consistency for IncrementalMarking. | |
719 void AdjustLiveBytes(Address address, int by, InvocationMode mode); | |
720 | |
721 // Converts the given boolean condition to JavaScript boolean value. | |
722 inline Object* ToBoolean(bool condition); | |
723 | |
724 // Performs garbage collection operation. | |
725 // Returns whether there is a chance that another major GC could | |
726 // collect more garbage. | |
727 inline bool CollectGarbage( | |
728 AllocationSpace space, | |
729 const char* gc_reason = NULL, | |
730 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); | |
731 | |
732 static const int kNoGCFlags = 0; | |
733 static const int kSweepPreciselyMask = 1; | |
734 static const int kReduceMemoryFootprintMask = 2; | |
735 static const int kAbortIncrementalMarkingMask = 4; | |
736 | |
737 // Making the heap iterable requires us to sweep precisely and abort any | |
738 // incremental marking as well. | |
739 static const int kMakeHeapIterableMask = | |
740 kSweepPreciselyMask | kAbortIncrementalMarkingMask; | |
741 | |
742 // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is | |
743 // non-zero, then the slower precise sweeper is used, which leaves the heap | |
744 // in a state where we can iterate over the heap visiting all objects. | |
745 void CollectAllGarbage( | |
746 int flags, | |
747 const char* gc_reason = NULL, | |
748 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); | |
749 | |
750 // Last hope GC, should try to squeeze as much as possible. | |
751 void CollectAllAvailableGarbage(const char* gc_reason = NULL); | |
752 | |
753 // Check whether the heap is currently iterable. | |
754 bool IsHeapIterable(); | |
755 | |
756 // Notify the heap that a context has been disposed. | |
757 int NotifyContextDisposed(); | |
758 | |
759 inline void increment_scan_on_scavenge_pages() { | |
760 scan_on_scavenge_pages_++; | |
761 if (FLAG_gc_verbose) { | |
762 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); | |
763 } | |
764 } | |
765 | |
766 inline void decrement_scan_on_scavenge_pages() { | |
767 scan_on_scavenge_pages_--; | |
768 if (FLAG_gc_verbose) { | |
769 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); | |
770 } | |
771 } | |
772 | |
773 PromotionQueue* promotion_queue() { return &promotion_queue_; } | |
774 | |
775 void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, | |
776 GCType gc_type_filter, | |
777 bool pass_isolate = true); | |
778 void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback); | |
779 | |
780 void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, | |
781 GCType gc_type_filter, | |
782 bool pass_isolate = true); | |
783 void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback); | |
784 | |
785 // Heap root getters. We have versions with and without type::cast() here. | |
786 // You can't use type::cast during GC because the assert fails. | |
787 // TODO(1490): Try removing the unchecked accessors, now that GC marking does | |
788 // not corrupt the map. | |
789 #define ROOT_ACCESSOR(type, name, camel_name) \ | |
790 type* name() { \ | |
791 return type::cast(roots_[k##camel_name##RootIndex]); \ | |
792 } \ | |
793 type* raw_unchecked_##name() { \ | |
794 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \ | |
795 } | |
796 ROOT_LIST(ROOT_ACCESSOR) | |
797 #undef ROOT_ACCESSOR | |
798 | |
799 // Utility type maps | |
800 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) \ | |
801 Map* name##_map() { \ | |
802 return Map::cast(roots_[k##Name##MapRootIndex]); \ | |
803 } | |
804 STRUCT_LIST(STRUCT_MAP_ACCESSOR) | |
805 #undef STRUCT_MAP_ACCESSOR | |
806 | |
807 #define STRING_ACCESSOR(name, str) String* name() { \ | |
808 return String::cast(roots_[k##name##RootIndex]); \ | |
809 } | |
810 INTERNALIZED_STRING_LIST(STRING_ACCESSOR) | |
811 #undef STRING_ACCESSOR | |
812 | |
813 // The hidden_string is special because it is the empty string, but does | |
814 // not match the empty string. | |
815 String* hidden_string() { return hidden_string_; } | |
816 | |
817 void set_native_contexts_list(Object* object) { | |
818 native_contexts_list_ = object; | |
819 } | |
820 Object* native_contexts_list() const { return native_contexts_list_; } | |
821 | |
822 void set_array_buffers_list(Object* object) { | |
823 array_buffers_list_ = object; | |
824 } | |
825 Object* array_buffers_list() const { return array_buffers_list_; } | |
826 | |
827 void set_allocation_sites_list(Object* object) { | |
828 allocation_sites_list_ = object; | |
829 } | |
830 Object* allocation_sites_list() { return allocation_sites_list_; } | |
831 | |
832 // Used in CreateAllocationSiteStub and the (de)serializer. | |
833 Object** allocation_sites_list_address() { return &allocation_sites_list_; } | |
834 | |
835 Object* weak_object_to_code_table() { return weak_object_to_code_table_; } | |
836 | |
837 void set_encountered_weak_collections(Object* weak_collection) { | |
838 encountered_weak_collections_ = weak_collection; | |
839 } | |
840 Object* encountered_weak_collections() const { | |
841 return encountered_weak_collections_; | |
842 } | |
843 | |
844 // Number of mark-sweeps. | |
845 unsigned int ms_count() { return ms_count_; } | |
846 | |
847 // Iterates over all roots in the heap. | |
848 void IterateRoots(ObjectVisitor* v, VisitMode mode); | |
849 // Iterates over all strong roots in the heap. | |
850 void IterateStrongRoots(ObjectVisitor* v, VisitMode mode); | |
851 // Iterates over entries in the smi roots list. Only interesting to the | |
852 // serializer/deserializer, since GC does not care about smis. | |
853 void IterateSmiRoots(ObjectVisitor* v); | |
854 // Iterates over all the other roots in the heap. | |
855 void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); | |
856 | |
857 // Iterate pointers to from semispace of new space found in memory interval | |
858 // from start to end. | |
859 void IterateAndMarkPointersToFromSpace(Address start, | |
860 Address end, | |
861 ObjectSlotCallback callback); | |
862 | |
863 // Returns whether the object resides in new space. | |
864 inline bool InNewSpace(Object* object); | |
865 inline bool InNewSpace(Address address); | |
866 inline bool InNewSpacePage(Address address); | |
867 inline bool InFromSpace(Object* object); | |
868 inline bool InToSpace(Object* object); | |
869 | |
870 // Returns whether the object resides in old pointer space. | |
871 inline bool InOldPointerSpace(Address address); | |
872 inline bool InOldPointerSpace(Object* object); | |
873 | |
874 // Returns whether the object resides in old data space. | |
875 inline bool InOldDataSpace(Address address); | |
876 inline bool InOldDataSpace(Object* object); | |
877 | |
878 // Checks whether an address/object in the heap (including auxiliary | |
879 // area and unused area). | |
880 bool Contains(Address addr); | |
881 bool Contains(HeapObject* value); | |
882 | |
883 // Checks whether an address/object in a space. | |
884 // Currently used by tests, serialization and heap verification only. | |
885 bool InSpace(Address addr, AllocationSpace space); | |
886 bool InSpace(HeapObject* value, AllocationSpace space); | |
887 | |
888 // Finds out which space an object should get promoted to based on its type. | |
889 inline OldSpace* TargetSpace(HeapObject* object); | |
890 static inline AllocationSpace TargetSpaceId(InstanceType type); | |
891 | |
892 // Checks whether the given object is allowed to be migrated from it's | |
893 // current space into the given destination space. Used for debugging. | |
894 inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); | |
895 | |
896 // Sets the stub_cache_ (only used when expanding the dictionary). | |
897 void public_set_code_stubs(UnseededNumberDictionary* value) { | |
898 roots_[kCodeStubsRootIndex] = value; | |
899 } | |
900 | |
901 // Support for computing object sizes for old objects during GCs. Returns | |
902 // a function that is guaranteed to be safe for computing object sizes in | |
903 // the current GC phase. | |
904 HeapObjectCallback GcSafeSizeOfOldObjectFunction() { | |
905 return gc_safe_size_of_old_object_; | |
906 } | |
907 | |
908 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). | |
909 void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) { | |
910 roots_[kNonMonomorphicCacheRootIndex] = value; | |
911 } | |
912 | |
913 void public_set_empty_script(Script* script) { | |
914 roots_[kEmptyScriptRootIndex] = script; | |
915 } | |
916 | |
917 void public_set_store_buffer_top(Address* top) { | |
918 roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top); | |
919 } | |
920 | |
921 void public_set_materialized_objects(FixedArray* objects) { | |
922 roots_[kMaterializedObjectsRootIndex] = objects; | |
923 } | |
924 | |
925 // Generated code can embed this address to get access to the roots. | |
926 Object** roots_array_start() { return roots_; } | |
927 | |
928 Address* store_buffer_top_address() { | |
929 return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); | |
930 } | |
931 | |
932 #ifdef VERIFY_HEAP | |
933 // Verify the heap is in its normal state before or after a GC. | |
934 void Verify(); | |
935 | |
936 | |
937 bool weak_embedded_objects_verification_enabled() { | |
938 return no_weak_object_verification_scope_depth_ == 0; | |
939 } | |
940 #endif | |
941 | |
942 #ifdef DEBUG | |
943 void Print(); | |
944 void PrintHandles(); | |
945 | |
946 void OldPointerSpaceCheckStoreBuffer(); | |
947 void MapSpaceCheckStoreBuffer(); | |
948 void LargeObjectSpaceCheckStoreBuffer(); | |
949 | |
950 // Report heap statistics. | |
951 void ReportHeapStatistics(const char* title); | |
952 void ReportCodeStatistics(const char* title); | |
953 #endif | |
954 | |
955 // Zapping is needed for verify heap, and always done in debug builds. | |
956 static inline bool ShouldZapGarbage() { | |
957 #ifdef DEBUG | |
958 return true; | |
959 #else | |
960 #ifdef VERIFY_HEAP | |
961 return FLAG_verify_heap; | |
962 #else | |
963 return false; | |
964 #endif | |
965 #endif | |
966 } | |
967 | |
968 // Number of "runtime allocations" done so far. | |
969 uint32_t allocations_count() { return allocations_count_; } | |
970 | |
971 // Returns deterministic "time" value in ms. Works only with | |
972 // FLAG_verify_predictable. | |
973 double synthetic_time() { return allocations_count_ / 100.0; } | |
974 | |
975 // Print short heap statistics. | |
976 void PrintShortHeapStatistics(); | |
977 | |
978 // Write barrier support for address[offset] = o. | |
979 INLINE(void RecordWrite(Address address, int offset)); | |
980 | |
981 // Write barrier support for address[start : start + len[ = o. | |
982 INLINE(void RecordWrites(Address address, int start, int len)); | |
983 | |
984 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; | |
985 inline HeapState gc_state() { return gc_state_; } | |
986 | |
987 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } | |
988 | |
989 #ifdef DEBUG | |
990 void set_allocation_timeout(int timeout) { | |
991 allocation_timeout_ = timeout; | |
992 } | |
993 | |
994 void TracePathToObjectFrom(Object* target, Object* root); | |
995 void TracePathToObject(Object* target); | |
996 void TracePathToGlobal(); | |
997 #endif | |
998 | |
999 // Callback function passed to Heap::Iterate etc. Copies an object if | |
1000 // necessary, the object might be promoted to an old space. The caller must | |
1001 // ensure the precondition that the object is (a) a heap object and (b) in | |
1002 // the heap's from space. | |
1003 static inline void ScavengePointer(HeapObject** p); | |
1004 static inline void ScavengeObject(HeapObject** p, HeapObject* object); | |
1005 | |
1006 enum ScratchpadSlotMode { | |
1007 IGNORE_SCRATCHPAD_SLOT, | |
1008 RECORD_SCRATCHPAD_SLOT | |
1009 }; | |
1010 | |
1011 // If an object has an AllocationMemento trailing it, return it, otherwise | |
1012 // return NULL; | |
1013 inline AllocationMemento* FindAllocationMemento(HeapObject* object); | |
1014 | |
1015 // An object may have an AllocationSite associated with it through a trailing | |
1016 // AllocationMemento. Its feedback should be updated when objects are found | |
1017 // in the heap. | |
1018 static inline void UpdateAllocationSiteFeedback( | |
1019 HeapObject* object, ScratchpadSlotMode mode); | |
1020 | |
1021 // Support for partial snapshots. After calling this we have a linear | |
1022 // space to write objects in each space. | |
1023 void ReserveSpace(int *sizes, Address* addresses); | |
1024 | |
1025 // | |
1026 // Support for the API. | |
1027 // | |
1028 | |
1029 void CreateApiObjects(); | |
1030 | |
1031 inline intptr_t PromotedTotalSize() { | |
1032 int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); | |
1033 if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt); | |
1034 if (total < 0) return 0; | |
1035 return static_cast<intptr_t>(total); | |
1036 } | |
1037 | |
1038 inline intptr_t OldGenerationSpaceAvailable() { | |
1039 return old_generation_allocation_limit_ - PromotedTotalSize(); | |
1040 } | |
1041 | |
1042 inline intptr_t OldGenerationCapacityAvailable() { | |
1043 return max_old_generation_size_ - PromotedTotalSize(); | |
1044 } | |
1045 | |
1046 static const intptr_t kMinimumOldGenerationAllocationLimit = | |
1047 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); | |
1048 | |
1049 static const int kPointerMultiplier = i::kPointerSize / 4; | |
1050 | |
1051 // The new space size has to be a power of 2. Sizes are in MB. | |
1052 static const int kMaxSemiSpaceSizeLowMemoryDevice = | |
1053 1 * kPointerMultiplier; | |
1054 static const int kMaxSemiSpaceSizeMediumMemoryDevice = | |
1055 4 * kPointerMultiplier; | |
1056 static const int kMaxSemiSpaceSizeHighMemoryDevice = | |
1057 8 * kPointerMultiplier; | |
1058 static const int kMaxSemiSpaceSizeHugeMemoryDevice = | |
1059 8 * kPointerMultiplier; | |
1060 | |
1061 // The old space size has to be a multiple of Page::kPageSize. | |
1062 // Sizes are in MB. | |
1063 static const int kMaxOldSpaceSizeLowMemoryDevice = | |
1064 128 * kPointerMultiplier; | |
1065 static const int kMaxOldSpaceSizeMediumMemoryDevice = | |
1066 256 * kPointerMultiplier; | |
1067 static const int kMaxOldSpaceSizeHighMemoryDevice = | |
1068 512 * kPointerMultiplier; | |
1069 static const int kMaxOldSpaceSizeHugeMemoryDevice = | |
1070 700 * kPointerMultiplier; | |
1071 | |
1072 // The executable size has to be a multiple of Page::kPageSize. | |
1073 // Sizes are in MB. | |
1074 static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; | |
1075 static const int kMaxExecutableSizeMediumMemoryDevice = | |
1076 192 * kPointerMultiplier; | |
1077 static const int kMaxExecutableSizeHighMemoryDevice = | |
1078 256 * kPointerMultiplier; | |
1079 static const int kMaxExecutableSizeHugeMemoryDevice = | |
1080 256 * kPointerMultiplier; | |
1081 | |
1082 intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size, | |
1083 int freed_global_handles); | |
1084 | |
1085 // Indicates whether inline bump-pointer allocation has been disabled. | |
1086 bool inline_allocation_disabled() { return inline_allocation_disabled_; } | |
1087 | |
1088 // Switch whether inline bump-pointer allocation should be used. | |
1089 void EnableInlineAllocation(); | |
1090 void DisableInlineAllocation(); | |
1091 | |
1092 // Implements the corresponding V8 API function. | |
1093 bool IdleNotification(int hint); | |
1094 | |
1095 // Declare all the root indices. This defines the root list order. | |
1096 enum RootListIndex { | |
1097 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, | |
1098 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) | |
1099 #undef ROOT_INDEX_DECLARATION | |
1100 | |
1101 #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, | |
1102 INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) | |
1103 #undef STRING_DECLARATION | |
1104 | |
1105 // Utility type maps | |
1106 #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, | |
1107 STRUCT_LIST(DECLARE_STRUCT_MAP) | |
1108 #undef DECLARE_STRUCT_MAP | |
1109 | |
1110 kStringTableRootIndex, | |
1111 | |
1112 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, | |
1113 SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) | |
1114 #undef ROOT_INDEX_DECLARATION | |
1115 | |
1116 kRootListLength, | |
1117 kStrongRootListLength = kStringTableRootIndex, | |
1118 kSmiRootsStart = kStringTableRootIndex + 1 | |
1119 }; | |
1120 | |
1121 STATIC_ASSERT(kUndefinedValueRootIndex == | |
1122 Internals::kUndefinedValueRootIndex); | |
1123 STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex); | |
1124 STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex); | |
1125 STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex); | |
1126 STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); | |
1127 | |
1128 // Generated code can embed direct references to non-writable roots if | |
1129 // they are in new space. | |
1130 static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); | |
1131 // Generated code can treat direct references to this root as constant. | |
1132 bool RootCanBeTreatedAsConstant(RootListIndex root_index); | |
1133 | |
1134 Map* MapForFixedTypedArray(ExternalArrayType array_type); | |
1135 RootListIndex RootIndexForFixedTypedArray( | |
1136 ExternalArrayType array_type); | |
1137 | |
1138 Map* MapForExternalArrayType(ExternalArrayType array_type); | |
1139 RootListIndex RootIndexForExternalArrayType( | |
1140 ExternalArrayType array_type); | |
1141 | |
1142 RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind); | |
1143 RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind); | |
1144 ExternalArray* EmptyExternalArrayForMap(Map* map); | |
1145 FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map); | |
1146 | |
1147 void RecordStats(HeapStats* stats, bool take_snapshot = false); | |
1148 | |
1149 // Copy block of memory from src to dst. Size of block should be aligned | |
1150 // by pointer size. | |
1151 static inline void CopyBlock(Address dst, Address src, int byte_size); | |
1152 | |
1153 // Optimized version of memmove for blocks with pointer size aligned sizes and | |
1154 // pointer size aligned addresses. | |
1155 static inline void MoveBlock(Address dst, Address src, int byte_size); | |
1156 | |
1157 // Check new space expansion criteria and expand semispaces if it was hit. | |
1158 void CheckNewSpaceExpansionCriteria(); | |
1159 | |
1160 inline void IncrementPromotedObjectsSize(int object_size) { | |
1161 DCHECK(object_size > 0); | |
1162 promoted_objects_size_ += object_size; | |
1163 } | |
1164 | |
1165 inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { | |
1166 DCHECK(object_size > 0); | |
1167 semi_space_copied_object_size_ += object_size; | |
1168 } | |
1169 | |
1170 inline void IncrementNodesDiedInNewSpace() { | |
1171 nodes_died_in_new_space_++; | |
1172 } | |
1173 | |
1174 inline void IncrementNodesCopiedInNewSpace() { | |
1175 nodes_copied_in_new_space_++; | |
1176 } | |
1177 | |
1178 inline void IncrementNodesPromoted() { | |
1179 nodes_promoted_++; | |
1180 } | |
1181 | |
1182 inline void IncrementYoungSurvivorsCounter(int survived) { | |
1183 DCHECK(survived >= 0); | |
1184 survived_since_last_expansion_ += survived; | |
1185 } | |
1186 | |
1187 inline bool NextGCIsLikelyToBeFull() { | |
1188 if (FLAG_gc_global) return true; | |
1189 | |
1190 if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true; | |
1191 | |
1192 intptr_t adjusted_allocation_limit = | |
1193 old_generation_allocation_limit_ - new_space_.Capacity(); | |
1194 | |
1195 if (PromotedTotalSize() >= adjusted_allocation_limit) return true; | |
1196 | |
1197 return false; | |
1198 } | |
1199 | |
1200 void UpdateNewSpaceReferencesInExternalStringTable( | |
1201 ExternalStringTableUpdaterCallback updater_func); | |
1202 | |
1203 void UpdateReferencesInExternalStringTable( | |
1204 ExternalStringTableUpdaterCallback updater_func); | |
1205 | |
1206 void ProcessWeakReferences(WeakObjectRetainer* retainer); | |
1207 | |
1208 void VisitExternalResources(v8::ExternalResourceVisitor* visitor); | |
1209 | |
1210 // An object should be promoted if the object has survived a | |
1211 // scavenge operation. | |
1212 inline bool ShouldBePromoted(Address old_address, int object_size); | |
1213 | |
1214 void ClearJSFunctionResultCaches(); | |
1215 | |
1216 void ClearNormalizedMapCaches(); | |
1217 | |
1218 GCTracer* tracer() { return &tracer_; } | |
1219 | |
1220 // Returns the size of objects residing in non new spaces. | |
1221 intptr_t PromotedSpaceSizeOfObjects(); | |
1222 | |
1223 double total_regexp_code_generated() { return total_regexp_code_generated_; } | |
1224 void IncreaseTotalRegexpCodeGenerated(int size) { | |
1225 total_regexp_code_generated_ += size; | |
1226 } | |
1227 | |
1228 void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) { | |
1229 if (is_crankshafted) { | |
1230 crankshaft_codegen_bytes_generated_ += size; | |
1231 } else { | |
1232 full_codegen_bytes_generated_ += size; | |
1233 } | |
1234 } | |
1235 | |
1236 // Update GC statistics that are tracked on the Heap. | |
1237 void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator, | |
1238 double marking_time); | |
1239 | |
1240 // Returns maximum GC pause. | |
1241 double get_max_gc_pause() { return max_gc_pause_; } | |
1242 | |
1243 // Returns maximum size of objects alive after GC. | |
1244 intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; } | |
1245 | |
1246 // Returns minimal interval between two subsequent collections. | |
1247 double get_min_in_mutator() { return min_in_mutator_; } | |
1248 | |
1249 MarkCompactCollector* mark_compact_collector() { | |
1250 return &mark_compact_collector_; | |
1251 } | |
1252 | |
1253 StoreBuffer* store_buffer() { | |
1254 return &store_buffer_; | |
1255 } | |
1256 | |
1257 Marking* marking() { | |
1258 return &marking_; | |
1259 } | |
1260 | |
1261 IncrementalMarking* incremental_marking() { | |
1262 return &incremental_marking_; | |
1263 } | |
1264 | |
1265 ExternalStringTable* external_string_table() { | |
1266 return &external_string_table_; | |
1267 } | |
1268 | |
1269 // Returns the current sweep generation. | |
1270 int sweep_generation() { | |
1271 return sweep_generation_; | |
1272 } | |
1273 | |
1274 inline Isolate* isolate(); | |
1275 | |
1276 void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); | |
1277 void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags); | |
1278 | |
1279 inline bool OldGenerationAllocationLimitReached(); | |
1280 | |
1281 inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) { | |
1282 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj); | |
1283 } | |
1284 | |
1285 void QueueMemoryChunkForFree(MemoryChunk* chunk); | |
1286 void FreeQueuedChunks(); | |
1287 | |
1288 int gc_count() const { return gc_count_; } | |
1289 | |
1290 // Completely clear the Instanceof cache (to stop it keeping objects alive | |
1291 // around a GC). | |
1292 inline void CompletelyClearInstanceofCache(); | |
1293 | |
1294 // The roots that have an index less than this are always in old space. | |
1295 static const int kOldSpaceRoots = 0x20; | |
1296 | |
1297 uint32_t HashSeed() { | |
1298 uint32_t seed = static_cast<uint32_t>(hash_seed()->value()); | |
1299 DCHECK(FLAG_randomize_hashes || seed == 0); | |
1300 return seed; | |
1301 } | |
1302 | |
1303 void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) { | |
1304 DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0)); | |
1305 set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset)); | |
1306 } | |
1307 | |
1308 void SetConstructStubDeoptPCOffset(int pc_offset) { | |
1309 DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0)); | |
1310 set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); | |
1311 } | |
1312 | |
1313 void SetGetterStubDeoptPCOffset(int pc_offset) { | |
1314 DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0)); | |
1315 set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); | |
1316 } | |
1317 | |
1318 void SetSetterStubDeoptPCOffset(int pc_offset) { | |
1319 DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0)); | |
1320 set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); | |
1321 } | |
1322 | |
1323 // For post mortem debugging. | |
1324 void RememberUnmappedPage(Address page, bool compacted); | |
1325 | |
1326 // Global inline caching age: it is incremented on some GCs after context | |
1327 // disposal. We use it to flush inline caches. | |
1328 int global_ic_age() { | |
1329 return global_ic_age_; | |
1330 } | |
1331 | |
1332 void AgeInlineCaches() { | |
1333 global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax; | |
1334 } | |
1335 | |
1336 bool flush_monomorphic_ics() { return flush_monomorphic_ics_; } | |
1337 | |
1338 int64_t amount_of_external_allocated_memory() { | |
1339 return amount_of_external_allocated_memory_; | |
1340 } | |
1341 | |
1342 void DeoptMarkedAllocationSites(); | |
1343 | |
1344 bool MaximumSizeScavenge() { | |
1345 return maximum_size_scavenges_ > 0; | |
1346 } | |
1347 | |
1348 bool DeoptMaybeTenuredAllocationSites() { | |
1349 return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; | |
1350 } | |
1351 | |
1352 // ObjectStats are kept in two arrays, counts and sizes. Related stats are | |
1353 // stored in a contiguous linear buffer. Stats groups are stored one after | |
1354 // another. | |
1355 enum { | |
1356 FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, | |
1357 FIRST_FIXED_ARRAY_SUB_TYPE = | |
1358 FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, | |
1359 FIRST_CODE_AGE_SUB_TYPE = | |
1360 FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, | |
1361 OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 | |
1362 }; | |
1363 | |
1364 void RecordObjectStats(InstanceType type, size_t size) { | |
1365 DCHECK(type <= LAST_TYPE); | |
1366 object_counts_[type]++; | |
1367 object_sizes_[type] += size; | |
1368 } | |
1369 | |
1370 void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) { | |
1371 int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type; | |
1372 int code_age_index = | |
1373 FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge; | |
1374 DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE && | |
1375 code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE); | |
1376 DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE && | |
1377 code_age_index < OBJECT_STATS_COUNT); | |
1378 object_counts_[code_sub_type_index]++; | |
1379 object_sizes_[code_sub_type_index] += size; | |
1380 object_counts_[code_age_index]++; | |
1381 object_sizes_[code_age_index] += size; | |
1382 } | |
1383 | |
1384 void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) { | |
1385 DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE); | |
1386 object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++; | |
1387 object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size; | |
1388 } | |
1389 | |
1390 void CheckpointObjectStats(); | |
1391 | |
1392 // We don't use a LockGuard here since we want to lock the heap | |
1393 // only when FLAG_concurrent_recompilation is true. | |
1394 class RelocationLock { | |
1395 public: | |
1396 explicit RelocationLock(Heap* heap) : heap_(heap) { | |
1397 heap_->relocation_mutex_.Lock(); | |
1398 } | |
1399 | |
1400 | |
1401 ~RelocationLock() { | |
1402 heap_->relocation_mutex_.Unlock(); | |
1403 } | |
1404 | |
1405 private: | |
1406 Heap* heap_; | |
1407 }; | |
1408 | |
1409 void AddWeakObjectToCodeDependency(Handle<Object> obj, | |
1410 Handle<DependentCode> dep); | |
1411 | |
1412 DependentCode* LookupWeakObjectToCodeDependency(Handle<Object> obj); | |
1413 | |
1414 void InitializeWeakObjectToCodeTable() { | |
1415 set_weak_object_to_code_table(undefined_value()); | |
1416 } | |
1417 | |
1418 void EnsureWeakObjectToCodeTable(); | |
1419 | |
1420 static void FatalProcessOutOfMemory(const char* location, | |
1421 bool take_snapshot = false); | |
1422 | |
1423 // This event is triggered after successful allocation of a new object made | |
1424 // by runtime. Allocations of target space for object evacuation do not | |
1425 // trigger the event. In order to track ALL allocations one must turn off | |
1426 // FLAG_inline_new and FLAG_use_allocation_folding. | |
1427 inline void OnAllocationEvent(HeapObject* object, int size_in_bytes); | |
1428 | |
1429 // This event is triggered after object is moved to a new place. | |
1430 inline void OnMoveEvent(HeapObject* target, | |
1431 HeapObject* source, | |
1432 int size_in_bytes); | |
1433 | |
1434 protected: | |
1435 // Methods made available to tests. | |
1436 | |
1437 // Allocates a JS Map in the heap. | |
1438 MUST_USE_RESULT AllocationResult AllocateMap( | |
1439 InstanceType instance_type, | |
1440 int instance_size, | |
1441 ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); | |
1442 | |
1443 // Allocates and initializes a new JavaScript object based on a | |
1444 // constructor. | |
1445 // If allocation_site is non-null, then a memento is emitted after the object | |
1446 // that points to the site. | |
1447 MUST_USE_RESULT AllocationResult AllocateJSObject( | |
1448 JSFunction* constructor, | |
1449 PretenureFlag pretenure = NOT_TENURED, | |
1450 AllocationSite* allocation_site = NULL); | |
1451 | |
1452 // Allocates and initializes a new JavaScript object based on a map. | |
1453 // Passing an allocation site means that a memento will be created that | |
1454 // points to the site. | |
1455 MUST_USE_RESULT AllocationResult AllocateJSObjectFromMap( | |
1456 Map* map, | |
1457 PretenureFlag pretenure = NOT_TENURED, | |
1458 bool alloc_props = true, | |
1459 AllocationSite* allocation_site = NULL); | |
1460 | |
1461 // Allocated a HeapNumber from value. | |
1462 MUST_USE_RESULT AllocationResult AllocateHeapNumber( | |
1463 double value, | |
1464 MutableMode mode = IMMUTABLE, | |
1465 PretenureFlag pretenure = NOT_TENURED); | |
1466 | |
1467 // Allocate a byte array of the specified length | |
1468 MUST_USE_RESULT AllocationResult AllocateByteArray( | |
1469 int length, | |
1470 PretenureFlag pretenure = NOT_TENURED); | |
1471 | |
1472 // Copy the code and scope info part of the code object, but insert | |
1473 // the provided data as the relocation information. | |
1474 MUST_USE_RESULT AllocationResult CopyCode(Code* code, | |
1475 Vector<byte> reloc_info); | |
1476 | |
1477 MUST_USE_RESULT AllocationResult CopyCode(Code* code); | |
1478 | |
1479 // Allocates a fixed array initialized with undefined values | |
1480 MUST_USE_RESULT AllocationResult AllocateFixedArray( | |
1481 int length, | |
1482 PretenureFlag pretenure = NOT_TENURED); | |
1483 | |
1484 private: | |
1485 Heap(); | |
1486 | |
1487 // The amount of external memory registered through the API kept alive | |
1488 // by global handles | |
1489 int64_t amount_of_external_allocated_memory_; | |
1490 | |
1491 // Caches the amount of external memory registered at the last global gc. | |
1492 int64_t amount_of_external_allocated_memory_at_last_global_gc_; | |
1493 | |
1494 // This can be calculated directly from a pointer to the heap; however, it is | |
1495 // more expedient to get at the isolate directly from within Heap methods. | |
1496 Isolate* isolate_; | |
1497 | |
1498 Object* roots_[kRootListLength]; | |
1499 | |
1500 size_t code_range_size_; | |
1501 int reserved_semispace_size_; | |
1502 int max_semi_space_size_; | |
1503 int initial_semispace_size_; | |
1504 intptr_t max_old_generation_size_; | |
1505 intptr_t max_executable_size_; | |
1506 intptr_t maximum_committed_; | |
1507 | |
1508 // For keeping track of how much data has survived | |
1509 // scavenge since last new space expansion. | |
1510 int survived_since_last_expansion_; | |
1511 | |
1512 // For keeping track on when to flush RegExp code. | |
1513 int sweep_generation_; | |
1514 | |
1515 int always_allocate_scope_depth_; | |
1516 | |
1517 // For keeping track of context disposals. | |
1518 int contexts_disposed_; | |
1519 | |
1520 int global_ic_age_; | |
1521 | |
1522 bool flush_monomorphic_ics_; | |
1523 | |
1524 int scan_on_scavenge_pages_; | |
1525 | |
1526 NewSpace new_space_; | |
1527 OldSpace* old_pointer_space_; | |
1528 OldSpace* old_data_space_; | |
1529 OldSpace* code_space_; | |
1530 MapSpace* map_space_; | |
1531 CellSpace* cell_space_; | |
1532 PropertyCellSpace* property_cell_space_; | |
1533 LargeObjectSpace* lo_space_; | |
1534 HeapState gc_state_; | |
1535 int gc_post_processing_depth_; | |
1536 Address new_space_top_after_last_gc_; | |
1537 | |
1538 // Returns the amount of external memory registered since last global gc. | |
1539 int64_t PromotedExternalMemorySize(); | |
1540 | |
1541 // How many "runtime allocations" happened. | |
1542 uint32_t allocations_count_; | |
1543 | |
1544 // Running hash over allocations performed. | |
1545 uint32_t raw_allocations_hash_; | |
1546 | |
1547 // Countdown counter, dumps allocation hash when 0. | |
1548 uint32_t dump_allocations_hash_countdown_; | |
1549 | |
1550 // How many mark-sweep collections happened. | |
1551 unsigned int ms_count_; | |
1552 | |
1553 // How many gc happened. | |
1554 unsigned int gc_count_; | |
1555 | |
1556 // For post mortem debugging. | |
1557 static const int kRememberedUnmappedPages = 128; | |
1558 int remembered_unmapped_pages_index_; | |
1559 Address remembered_unmapped_pages_[kRememberedUnmappedPages]; | |
1560 | |
1561 // Total length of the strings we failed to flatten since the last GC. | |
1562 int unflattened_strings_length_; | |
1563 | |
1564 #define ROOT_ACCESSOR(type, name, camel_name) \ | |
1565 inline void set_##name(type* value) { \ | |
1566 /* The deserializer makes use of the fact that these common roots are */ \ | |
1567 /* never in new space and never on a page that is being compacted. */ \ | |
1568 DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \ | |
1569 roots_[k##camel_name##RootIndex] = value; \ | |
1570 } | |
1571 ROOT_LIST(ROOT_ACCESSOR) | |
1572 #undef ROOT_ACCESSOR | |
1573 | |
1574 #ifdef DEBUG | |
1575 // If the --gc-interval flag is set to a positive value, this | |
1576 // variable holds the value indicating the number of allocations | |
1577 // remain until the next failure and garbage collection. | |
1578 int allocation_timeout_; | |
1579 #endif // DEBUG | |
1580 | |
1581 // Limit that triggers a global GC on the next (normally caused) GC. This | |
1582 // is checked when we have already decided to do a GC to help determine | |
1583 // which collector to invoke, before expanding a paged space in the old | |
1584 // generation and on every allocation in large object space. | |
1585 intptr_t old_generation_allocation_limit_; | |
1586 | |
1587 // Indicates that an allocation has failed in the old generation since the | |
1588 // last GC. | |
1589 bool old_gen_exhausted_; | |
1590 | |
1591 // Indicates that inline bump-pointer allocation has been globally disabled | |
1592 // for all spaces. This is used to disable allocations in generated code. | |
1593 bool inline_allocation_disabled_; | |
1594 | |
1595 // Weak list heads, threaded through the objects. | |
1596 // List heads are initilized lazily and contain the undefined_value at start. | |
1597 Object* native_contexts_list_; | |
1598 Object* array_buffers_list_; | |
1599 Object* allocation_sites_list_; | |
1600 | |
1601 // WeakHashTable that maps objects embedded in optimized code to dependent | |
1602 // code list. It is initilized lazily and contains the undefined_value at | |
1603 // start. | |
1604 Object* weak_object_to_code_table_; | |
1605 | |
1606 // List of encountered weak collections (JSWeakMap and JSWeakSet) during | |
1607 // marking. It is initialized during marking, destroyed after marking and | |
1608 // contains Smi(0) while marking is not active. | |
1609 Object* encountered_weak_collections_; | |
1610 | |
1611 StoreBufferRebuilder store_buffer_rebuilder_; | |
1612 | |
1613 struct StringTypeTable { | |
1614 InstanceType type; | |
1615 int size; | |
1616 RootListIndex index; | |
1617 }; | |
1618 | |
1619 struct ConstantStringTable { | |
1620 const char* contents; | |
1621 RootListIndex index; | |
1622 }; | |
1623 | |
1624 struct StructTable { | |
1625 InstanceType type; | |
1626 int size; | |
1627 RootListIndex index; | |
1628 }; | |
1629 | |
1630 static const StringTypeTable string_type_table[]; | |
1631 static const ConstantStringTable constant_string_table[]; | |
1632 static const StructTable struct_table[]; | |
1633 | |
1634 // The special hidden string which is an empty string, but does not match | |
1635 // any string when looked up in properties. | |
1636 String* hidden_string_; | |
1637 | |
1638 // GC callback function, called before and after mark-compact GC. | |
1639 // Allocations in the callback function are disallowed. | |
1640 struct GCPrologueCallbackPair { | |
1641 GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback, | |
1642 GCType gc_type, | |
1643 bool pass_isolate) | |
1644 : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) { | |
1645 } | |
1646 bool operator==(const GCPrologueCallbackPair& pair) const { | |
1647 return pair.callback == callback; | |
1648 } | |
1649 v8::Isolate::GCPrologueCallback callback; | |
1650 GCType gc_type; | |
1651 // TODO(dcarney): remove variable | |
1652 bool pass_isolate_; | |
1653 }; | |
1654 List<GCPrologueCallbackPair> gc_prologue_callbacks_; | |
1655 | |
1656 struct GCEpilogueCallbackPair { | |
1657 GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback, | |
1658 GCType gc_type, | |
1659 bool pass_isolate) | |
1660 : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) { | |
1661 } | |
1662 bool operator==(const GCEpilogueCallbackPair& pair) const { | |
1663 return pair.callback == callback; | |
1664 } | |
1665 v8::Isolate::GCPrologueCallback callback; | |
1666 GCType gc_type; | |
1667 // TODO(dcarney): remove variable | |
1668 bool pass_isolate_; | |
1669 }; | |
1670 List<GCEpilogueCallbackPair> gc_epilogue_callbacks_; | |
1671 | |
1672 // Support for computing object sizes during GC. | |
1673 HeapObjectCallback gc_safe_size_of_old_object_; | |
1674 static int GcSafeSizeOfOldObject(HeapObject* object); | |
1675 | |
1676 // Update the GC state. Called from the mark-compact collector. | |
1677 void MarkMapPointersAsEncoded(bool encoded) { | |
1678 DCHECK(!encoded); | |
1679 gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject; | |
1680 } | |
1681 | |
1682 // Code that should be run before and after each GC. Includes some | |
1683 // reporting/verification activities when compiled with DEBUG set. | |
1684 void GarbageCollectionPrologue(); | |
1685 void GarbageCollectionEpilogue(); | |
1686 | |
1687 // Pretenuring decisions are made based on feedback collected during new | |
1688 // space evacuation. Note that between feedback collection and calling this | |
1689 // method object in old space must not move. | |
1690 // Right now we only process pretenuring feedback in high promotion mode. | |
1691 void ProcessPretenuringFeedback(); | |
1692 | |
1693 // Checks whether a global GC is necessary | |
1694 GarbageCollector SelectGarbageCollector(AllocationSpace space, | |
1695 const char** reason); | |
1696 | |
1697 // Make sure there is a filler value behind the top of the new space | |
1698 // so that the GC does not confuse some unintialized/stale memory | |
1699 // with the allocation memento of the object at the top | |
1700 void EnsureFillerObjectAtTop(); | |
1701 | |
1702 // Ensure that we have swept all spaces in such a way that we can iterate | |
1703 // over all objects. May cause a GC. | |
1704 void MakeHeapIterable(); | |
1705 | |
1706 // Performs garbage collection operation. | |
1707 // Returns whether there is a chance that another major GC could | |
1708 // collect more garbage. | |
1709 bool CollectGarbage( | |
1710 GarbageCollector collector, | |
1711 const char* gc_reason, | |
1712 const char* collector_reason, | |
1713 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); | |
1714 | |
1715 // Performs garbage collection | |
1716 // Returns whether there is a chance another major GC could | |
1717 // collect more garbage. | |
1718 bool PerformGarbageCollection( | |
1719 GarbageCollector collector, | |
1720 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); | |
1721 | |
1722 inline void UpdateOldSpaceLimits(); | |
1723 | |
1724 // Selects the proper allocation space depending on the given object | |
1725 // size, pretenuring decision, and preferred old-space. | |
1726 static AllocationSpace SelectSpace(int object_size, | |
1727 AllocationSpace preferred_old_space, | |
1728 PretenureFlag pretenure) { | |
1729 DCHECK(preferred_old_space == OLD_POINTER_SPACE || | |
1730 preferred_old_space == OLD_DATA_SPACE); | |
1731 if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE; | |
1732 return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE; | |
1733 } | |
1734 | |
1735 // Allocate an uninitialized object. The memory is non-executable if the | |
1736 // hardware and OS allow. This is the single choke-point for allocations | |
1737 // performed by the runtime and should not be bypassed (to extend this to | |
1738 // inlined allocations, use the Heap::DisableInlineAllocation() support). | |
1739 MUST_USE_RESULT inline AllocationResult AllocateRaw( | |
1740 int size_in_bytes, | |
1741 AllocationSpace space, | |
1742 AllocationSpace retry_space); | |
1743 | |
1744 // Allocates a heap object based on the map. | |
1745 MUST_USE_RESULT AllocationResult Allocate( | |
1746 Map* map, | |
1747 AllocationSpace space, | |
1748 AllocationSite* allocation_site = NULL); | |
1749 | |
1750 // Allocates a partial map for bootstrapping. | |
1751 MUST_USE_RESULT AllocationResult AllocatePartialMap( | |
1752 InstanceType instance_type, | |
1753 int instance_size); | |
1754 | |
1755 // Initializes a JSObject based on its map. | |
1756 void InitializeJSObjectFromMap(JSObject* obj, | |
1757 FixedArray* properties, | |
1758 Map* map); | |
1759 void InitializeAllocationMemento(AllocationMemento* memento, | |
1760 AllocationSite* allocation_site); | |
1761 | |
1762 // Allocate a block of memory in the given space (filled with a filler). | |
1763 // Used as a fall-back for generated code when the space is full. | |
1764 MUST_USE_RESULT AllocationResult AllocateFillerObject(int size, | |
1765 bool double_align, | |
1766 AllocationSpace space); | |
1767 | |
1768 // Allocate an uninitialized fixed array. | |
1769 MUST_USE_RESULT AllocationResult AllocateRawFixedArray( | |
1770 int length, PretenureFlag pretenure); | |
1771 | |
1772 // Allocate an uninitialized fixed double array. | |
1773 MUST_USE_RESULT AllocationResult AllocateRawFixedDoubleArray( | |
1774 int length, PretenureFlag pretenure); | |
1775 | |
1776 // Allocate an initialized fixed array with the given filler value. | |
1777 MUST_USE_RESULT AllocationResult AllocateFixedArrayWithFiller( | |
1778 int length, PretenureFlag pretenure, Object* filler); | |
1779 | |
1780 // Allocate and partially initializes a String. There are two String | |
1781 // encodings: ASCII and two byte. These functions allocate a string of the | |
1782 // given length and set its map and length fields. The characters of the | |
1783 // string are uninitialized. | |
1784 MUST_USE_RESULT AllocationResult AllocateRawOneByteString( | |
1785 int length, PretenureFlag pretenure); | |
1786 MUST_USE_RESULT AllocationResult AllocateRawTwoByteString( | |
1787 int length, PretenureFlag pretenure); | |
1788 | |
1789 bool CreateInitialMaps(); | |
1790 void CreateInitialObjects(); | |
1791 | |
1792 // Allocates an internalized string in old space based on the character | |
1793 // stream. | |
1794 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8( | |
1795 Vector<const char> str, | |
1796 int chars, | |
1797 uint32_t hash_field); | |
1798 | |
1799 MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString( | |
1800 Vector<const uint8_t> str, | |
1801 uint32_t hash_field); | |
1802 | |
1803 MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString( | |
1804 Vector<const uc16> str, | |
1805 uint32_t hash_field); | |
1806 | |
1807 template<bool is_one_byte, typename T> | |
1808 MUST_USE_RESULT AllocationResult AllocateInternalizedStringImpl( | |
1809 T t, int chars, uint32_t hash_field); | |
1810 | |
1811 template<typename T> | |
1812 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl( | |
1813 T t, int chars, uint32_t hash_field); | |
1814 | |
1815 // Allocates an uninitialized fixed array. It must be filled by the caller. | |
1816 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length); | |
1817 | |
1818 // Make a copy of src and return it. Returns | |
1819 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. | |
1820 MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src); | |
1821 | |
1822 // Make a copy of src, set the map, and return the copy. Returns | |
1823 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. | |
1824 MUST_USE_RESULT AllocationResult CopyFixedArrayWithMap(FixedArray* src, | |
1825 Map* map); | |
1826 | |
1827 // Make a copy of src and return it. Returns | |
1828 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. | |
1829 MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray( | |
1830 FixedDoubleArray* src); | |
1831 | |
1832 // Make a copy of src and return it. Returns | |
1833 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. | |
1834 MUST_USE_RESULT inline AllocationResult CopyConstantPoolArray( | |
1835 ConstantPoolArray* src); | |
1836 | |
1837 | |
1838 // Computes a single character string where the character has code. | |
1839 // A cache is used for ASCII codes. | |
1840 MUST_USE_RESULT AllocationResult LookupSingleCharacterStringFromCode( | |
1841 uint16_t code); | |
1842 | |
1843 // Allocate a symbol in old space. | |
1844 MUST_USE_RESULT AllocationResult AllocateSymbol(); | |
1845 | |
1846 // Make a copy of src, set the map, and return the copy. | |
1847 MUST_USE_RESULT AllocationResult CopyConstantPoolArrayWithMap( | |
1848 ConstantPoolArray* src, Map* map); | |
1849 | |
1850 MUST_USE_RESULT AllocationResult AllocateConstantPoolArray( | |
1851 const ConstantPoolArray::NumberOfEntries& small); | |
1852 | |
1853 MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray( | |
1854 const ConstantPoolArray::NumberOfEntries& small, | |
1855 const ConstantPoolArray::NumberOfEntries& extended); | |
1856 | |
1857 // Allocates an external array of the specified length and type. | |
1858 MUST_USE_RESULT AllocationResult AllocateExternalArray( | |
1859 int length, | |
1860 ExternalArrayType array_type, | |
1861 void* external_pointer, | |
1862 PretenureFlag pretenure); | |
1863 | |
1864 // Allocates a fixed typed array of the specified length and type. | |
1865 MUST_USE_RESULT AllocationResult AllocateFixedTypedArray( | |
1866 int length, | |
1867 ExternalArrayType array_type, | |
1868 PretenureFlag pretenure); | |
1869 | |
1870 // Make a copy of src and return it. | |
1871 MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src); | |
1872 | |
1873 // Make a copy of src, set the map, and return the copy. | |
1874 MUST_USE_RESULT AllocationResult CopyFixedDoubleArrayWithMap( | |
1875 FixedDoubleArray* src, Map* map); | |
1876 | |
1877 // Allocates a fixed double array with uninitialized values. Returns | |
1878 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray( | |
1879 int length, | |
1880 PretenureFlag pretenure = NOT_TENURED); | |
1881 | |
1882 // These five Create*EntryStub functions are here and forced to not be inlined | |
1883 // because of a gcc-4.4 bug that assigns wrong vtable entries. | |
1884 NO_INLINE(void CreateJSEntryStub()); | |
1885 NO_INLINE(void CreateJSConstructEntryStub()); | |
1886 | |
1887 void CreateFixedStubs(); | |
1888 | |
1889 // Allocate empty fixed array. | |
1890 MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray(); | |
1891 | |
1892 // Allocate empty external array of given type. | |
1893 MUST_USE_RESULT AllocationResult AllocateEmptyExternalArray( | |
1894 ExternalArrayType array_type); | |
1895 | |
1896 // Allocate empty fixed typed array of given type. | |
1897 MUST_USE_RESULT AllocationResult AllocateEmptyFixedTypedArray( | |
1898 ExternalArrayType array_type); | |
1899 | |
1900 // Allocate empty constant pool array. | |
1901 MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray(); | |
1902 | |
1903 // Allocate a tenured simple cell. | |
1904 MUST_USE_RESULT AllocationResult AllocateCell(Object* value); | |
1905 | |
1906 // Allocate a tenured JS global property cell initialized with the hole. | |
1907 MUST_USE_RESULT AllocationResult AllocatePropertyCell(); | |
1908 | |
1909 // Allocates a new utility object in the old generation. | |
1910 MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type); | |
1911 | |
1912 // Allocates a new foreign object. | |
1913 MUST_USE_RESULT AllocationResult AllocateForeign( | |
1914 Address address, PretenureFlag pretenure = NOT_TENURED); | |
1915 | |
1916 MUST_USE_RESULT AllocationResult AllocateCode(int object_size, | |
1917 bool immovable); | |
1918 | |
1919 MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key); | |
1920 | |
1921 MUST_USE_RESULT AllocationResult InternalizeString(String* str); | |
1922 | |
1923 // Performs a minor collection in new generation. | |
1924 void Scavenge(); | |
1925 | |
1926 // Commits from space if it is uncommitted. | |
1927 void EnsureFromSpaceIsCommitted(); | |
1928 | |
1929 // Uncommit unused semi space. | |
1930 bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } | |
1931 | |
1932 // Fill in bogus values in from space | |
1933 void ZapFromSpace(); | |
1934 | |
1935 static String* UpdateNewSpaceReferenceInExternalStringTableEntry( | |
1936 Heap* heap, | |
1937 Object** pointer); | |
1938 | |
1939 Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); | |
1940 static void ScavengeStoreBufferCallback(Heap* heap, | |
1941 MemoryChunk* page, | |
1942 StoreBufferEvent event); | |
1943 | |
1944 // Performs a major collection in the whole heap. | |
1945 void MarkCompact(); | |
1946 | |
1947 // Code to be run before and after mark-compact. | |
1948 void MarkCompactPrologue(); | |
1949 | |
1950 void ProcessNativeContexts(WeakObjectRetainer* retainer); | |
1951 void ProcessArrayBuffers(WeakObjectRetainer* retainer); | |
1952 void ProcessAllocationSites(WeakObjectRetainer* retainer); | |
1953 | |
1954 // Deopts all code that contains allocation instruction which are tenured or | |
1955 // not tenured. Moreover it clears the pretenuring allocation site statistics. | |
1956 void ResetAllAllocationSitesDependentCode(PretenureFlag flag); | |
1957 | |
1958 // Evaluates local pretenuring for the old space and calls | |
1959 // ResetAllTenuredAllocationSitesDependentCode if too many objects died in | |
1960 // the old space. | |
1961 void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); | |
1962 | |
1963 // Called on heap tear-down. | |
1964 void TearDownArrayBuffers(); | |
1965 | |
1966 // Record statistics before and after garbage collection. | |
1967 void ReportStatisticsBeforeGC(); | |
1968 void ReportStatisticsAfterGC(); | |
1969 | |
1970 // Slow part of scavenge object. | |
1971 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); | |
1972 | |
1973 // Total RegExp code ever generated | |
1974 double total_regexp_code_generated_; | |
1975 | |
1976 GCTracer tracer_; | |
1977 | |
1978 // Creates and installs the full-sized number string cache. | |
1979 int FullSizeNumberStringCacheLength(); | |
1980 // Flush the number to string cache. | |
1981 void FlushNumberStringCache(); | |
1982 | |
1983 // Sets used allocation sites entries to undefined. | |
1984 void FlushAllocationSitesScratchpad(); | |
1985 | |
1986 // Initializes the allocation sites scratchpad with undefined values. | |
1987 void InitializeAllocationSitesScratchpad(); | |
1988 | |
1989 // Adds an allocation site to the scratchpad if there is space left. | |
1990 void AddAllocationSiteToScratchpad(AllocationSite* site, | |
1991 ScratchpadSlotMode mode); | |
1992 | |
1993 void UpdateSurvivalStatistics(int start_new_space_size); | |
1994 | |
1995 static const int kYoungSurvivalRateHighThreshold = 90; | |
1996 static const int kYoungSurvivalRateAllowedDeviation = 15; | |
1997 | |
1998 static const int kOldSurvivalRateLowThreshold = 10; | |
1999 | |
2000 int high_survival_rate_period_length_; | |
2001 intptr_t promoted_objects_size_; | |
2002 double promotion_rate_; | |
2003 intptr_t semi_space_copied_object_size_; | |
2004 double semi_space_copied_rate_; | |
2005 int nodes_died_in_new_space_; | |
2006 int nodes_copied_in_new_space_; | |
2007 int nodes_promoted_; | |
2008 | |
2009 // This is the pretenuring trigger for allocation sites that are in maybe | |
2010 // tenure state. When we switched to the maximum new space size we deoptimize | |
2011 // the code that belongs to the allocation site and derive the lifetime | |
2012 // of the allocation site. | |
2013 unsigned int maximum_size_scavenges_; | |
2014 | |
2015 // TODO(hpayer): Allocation site pretenuring may make this method obsolete. | |
2016 // Re-visit incremental marking heuristics. | |
2017 bool IsHighSurvivalRate() { | |
2018 return high_survival_rate_period_length_ > 0; | |
2019 } | |
2020 | |
2021 void SelectScavengingVisitorsTable(); | |
2022 | |
2023 void StartIdleRound() { | |
2024 mark_sweeps_since_idle_round_started_ = 0; | |
2025 } | |
2026 | |
2027 void FinishIdleRound() { | |
2028 mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound; | |
2029 scavenges_since_last_idle_round_ = 0; | |
2030 } | |
2031 | |
2032 bool EnoughGarbageSinceLastIdleRound() { | |
2033 return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold); | |
2034 } | |
2035 | |
2036 // Estimates how many milliseconds a Mark-Sweep would take to complete. | |
2037 // In idle notification handler we assume that this function will return: | |
2038 // - a number less than 10 for small heaps, which are less than 8Mb. | |
2039 // - a number greater than 10 for large heaps, which are greater than 32Mb. | |
2040 int TimeMarkSweepWouldTakeInMs() { | |
2041 // Rough estimate of how many megabytes of heap can be processed in 1 ms. | |
2042 static const int kMbPerMs = 2; | |
2043 | |
2044 int heap_size_mb = static_cast<int>(SizeOfObjects() / MB); | |
2045 return heap_size_mb / kMbPerMs; | |
2046 } | |
2047 | |
2048 void AdvanceIdleIncrementalMarking(intptr_t step_size); | |
2049 | |
2050 void ClearObjectStats(bool clear_last_time_stats = false); | |
2051 | |
2052 void set_weak_object_to_code_table(Object* value) { | |
2053 DCHECK(!InNewSpace(value)); | |
2054 weak_object_to_code_table_ = value; | |
2055 } | |
2056 | |
2057 Object** weak_object_to_code_table_address() { | |
2058 return &weak_object_to_code_table_; | |
2059 } | |
2060 | |
2061 inline void UpdateAllocationsHash(HeapObject* object); | |
2062 inline void UpdateAllocationsHash(uint32_t value); | |
2063 inline void PrintAlloctionsHash(); | |
2064 | |
2065 static const int kInitialStringTableSize = 2048; | |
2066 static const int kInitialEvalCacheSize = 64; | |
2067 static const int kInitialNumberStringCacheSize = 256; | |
2068 | |
2069 // Object counts and used memory by InstanceType | |
2070 size_t object_counts_[OBJECT_STATS_COUNT]; | |
2071 size_t object_counts_last_time_[OBJECT_STATS_COUNT]; | |
2072 size_t object_sizes_[OBJECT_STATS_COUNT]; | |
2073 size_t object_sizes_last_time_[OBJECT_STATS_COUNT]; | |
2074 | |
2075 // Maximum GC pause. | |
2076 double max_gc_pause_; | |
2077 | |
2078 // Total time spent in GC. | |
2079 double total_gc_time_ms_; | |
2080 | |
2081 // Maximum size of objects alive after GC. | |
2082 intptr_t max_alive_after_gc_; | |
2083 | |
2084 // Minimal interval between two subsequent collections. | |
2085 double min_in_mutator_; | |
2086 | |
2087 // Cumulative GC time spent in marking | |
2088 double marking_time_; | |
2089 | |
2090 // Cumulative GC time spent in sweeping | |
2091 double sweeping_time_; | |
2092 | |
2093 MarkCompactCollector mark_compact_collector_; | |
2094 | |
2095 StoreBuffer store_buffer_; | |
2096 | |
2097 Marking marking_; | |
2098 | |
2099 IncrementalMarking incremental_marking_; | |
2100 | |
2101 int number_idle_notifications_; | |
2102 unsigned int last_idle_notification_gc_count_; | |
2103 bool last_idle_notification_gc_count_init_; | |
2104 | |
2105 int mark_sweeps_since_idle_round_started_; | |
2106 unsigned int gc_count_at_last_idle_gc_; | |
2107 int scavenges_since_last_idle_round_; | |
2108 | |
2109 // These two counters are monotomically increasing and never reset. | |
2110 size_t full_codegen_bytes_generated_; | |
2111 size_t crankshaft_codegen_bytes_generated_; | |
2112 | |
2113 // If the --deopt_every_n_garbage_collections flag is set to a positive value, | |
2114 // this variable holds the number of garbage collections since the last | |
2115 // deoptimization triggered by garbage collection. | |
2116 int gcs_since_last_deopt_; | |
2117 | |
2118 #ifdef VERIFY_HEAP | |
2119 int no_weak_object_verification_scope_depth_; | |
2120 #endif | |
2121 | |
2122 static const int kAllocationSiteScratchpadSize = 256; | |
2123 int allocation_sites_scratchpad_length_; | |
2124 | |
2125 static const int kMaxMarkSweepsInIdleRound = 7; | |
2126 static const int kIdleScavengeThreshold = 5; | |
2127 | |
2128 // Shared state read by the scavenge collector and set by ScavengeObject. | |
2129 PromotionQueue promotion_queue_; | |
2130 | |
2131 // Flag is set when the heap has been configured. The heap can be repeatedly | |
2132 // configured through the API until it is set up. | |
2133 bool configured_; | |
2134 | |
2135 ExternalStringTable external_string_table_; | |
2136 | |
2137 VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; | |
2138 | |
2139 MemoryChunk* chunks_queued_for_free_; | |
2140 | |
2141 base::Mutex relocation_mutex_; | |
2142 | |
2143 int gc_callbacks_depth_; | |
2144 | |
2145 friend class AlwaysAllocateScope; | |
2146 friend class Factory; | |
2147 friend class GCCallbacksScope; | |
2148 friend class GCTracer; | |
2149 friend class HeapIterator; | |
2150 friend class Isolate; | |
2151 friend class MarkCompactCollector; | |
2152 friend class MarkCompactMarkingVisitor; | |
2153 friend class MapCompact; | |
2154 #ifdef VERIFY_HEAP | |
2155 friend class NoWeakObjectVerificationScope; | |
2156 #endif | |
2157 friend class Page; | |
2158 | |
2159 DISALLOW_COPY_AND_ASSIGN(Heap); | |
2160 }; | |
2161 | |
2162 | |
2163 class HeapStats { | |
2164 public: | |
2165 static const int kStartMarker = 0xDECADE00; | |
2166 static const int kEndMarker = 0xDECADE01; | |
2167 | |
2168 int* start_marker; // 0 | |
2169 int* new_space_size; // 1 | |
2170 int* new_space_capacity; // 2 | |
2171 intptr_t* old_pointer_space_size; // 3 | |
2172 intptr_t* old_pointer_space_capacity; // 4 | |
2173 intptr_t* old_data_space_size; // 5 | |
2174 intptr_t* old_data_space_capacity; // 6 | |
2175 intptr_t* code_space_size; // 7 | |
2176 intptr_t* code_space_capacity; // 8 | |
2177 intptr_t* map_space_size; // 9 | |
2178 intptr_t* map_space_capacity; // 10 | |
2179 intptr_t* cell_space_size; // 11 | |
2180 intptr_t* cell_space_capacity; // 12 | |
2181 intptr_t* lo_space_size; // 13 | |
2182 int* global_handle_count; // 14 | |
2183 int* weak_global_handle_count; // 15 | |
2184 int* pending_global_handle_count; // 16 | |
2185 int* near_death_global_handle_count; // 17 | |
2186 int* free_global_handle_count; // 18 | |
2187 intptr_t* memory_allocator_size; // 19 | |
2188 intptr_t* memory_allocator_capacity; // 20 | |
2189 int* objects_per_type; // 21 | |
2190 int* size_per_type; // 22 | |
2191 int* os_error; // 23 | |
2192 int* end_marker; // 24 | |
2193 intptr_t* property_cell_space_size; // 25 | |
2194 intptr_t* property_cell_space_capacity; // 26 | |
2195 }; | |
2196 | |
2197 | |
2198 class AlwaysAllocateScope { | |
2199 public: | |
2200 explicit inline AlwaysAllocateScope(Isolate* isolate); | |
2201 inline ~AlwaysAllocateScope(); | |
2202 | |
2203 private: | |
2204 // Implicitly disable artificial allocation failures. | |
2205 Heap* heap_; | |
2206 DisallowAllocationFailure daf_; | |
2207 }; | |
2208 | |
2209 | |
2210 #ifdef VERIFY_HEAP | |
2211 class NoWeakObjectVerificationScope { | |
2212 public: | |
2213 inline NoWeakObjectVerificationScope(); | |
2214 inline ~NoWeakObjectVerificationScope(); | |
2215 }; | |
2216 #endif | |
2217 | |
2218 | |
2219 class GCCallbacksScope { | |
2220 public: | |
2221 explicit inline GCCallbacksScope(Heap* heap); | |
2222 inline ~GCCallbacksScope(); | |
2223 | |
2224 inline bool CheckReenter(); | |
2225 | |
2226 private: | |
2227 Heap* heap_; | |
2228 }; | |
2229 | |
2230 | |
2231 // Visitor class to verify interior pointers in spaces that do not contain | |
2232 // or care about intergenerational references. All heap object pointers have to | |
2233 // point into the heap to a location that has a map pointer at its first word. | |
2234 // Caveat: Heap::Contains is an approximation because it can return true for | |
2235 // objects in a heap space but above the allocation pointer. | |
2236 class VerifyPointersVisitor: public ObjectVisitor { | |
2237 public: | |
2238 inline void VisitPointers(Object** start, Object** end); | |
2239 }; | |
2240 | |
2241 | |
2242 // Verify that all objects are Smis. | |
2243 class VerifySmisVisitor: public ObjectVisitor { | |
2244 public: | |
2245 inline void VisitPointers(Object** start, Object** end); | |
2246 }; | |
2247 | |
2248 | |
2249 // Space iterator for iterating over all spaces of the heap. Returns each space | |
2250 // in turn, and null when it is done. | |
2251 class AllSpaces BASE_EMBEDDED { | |
2252 public: | |
2253 explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {} | |
2254 Space* next(); | |
2255 private: | |
2256 Heap* heap_; | |
2257 int counter_; | |
2258 }; | |
2259 | |
2260 | |
2261 // Space iterator for iterating over all old spaces of the heap: Old pointer | |
2262 // space, old data space and code space. Returns each space in turn, and null | |
2263 // when it is done. | |
2264 class OldSpaces BASE_EMBEDDED { | |
2265 public: | |
2266 explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {} | |
2267 OldSpace* next(); | |
2268 private: | |
2269 Heap* heap_; | |
2270 int counter_; | |
2271 }; | |
2272 | |
2273 | |
2274 // Space iterator for iterating over all the paged spaces of the heap: Map | |
2275 // space, old pointer space, old data space, code space and cell space. Returns | |
2276 // each space in turn, and null when it is done. | |
2277 class PagedSpaces BASE_EMBEDDED { | |
2278 public: | |
2279 explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {} | |
2280 PagedSpace* next(); | |
2281 private: | |
2282 Heap* heap_; | |
2283 int counter_; | |
2284 }; | |
2285 | |
2286 | |
2287 // Space iterator for iterating over all spaces of the heap. | |
2288 // For each space an object iterator is provided. The deallocation of the | |
2289 // returned object iterators is handled by the space iterator. | |
2290 class SpaceIterator : public Malloced { | |
2291 public: | |
2292 explicit SpaceIterator(Heap* heap); | |
2293 SpaceIterator(Heap* heap, HeapObjectCallback size_func); | |
2294 virtual ~SpaceIterator(); | |
2295 | |
2296 bool has_next(); | |
2297 ObjectIterator* next(); | |
2298 | |
2299 private: | |
2300 ObjectIterator* CreateIterator(); | |
2301 | |
2302 Heap* heap_; | |
2303 int current_space_; // from enum AllocationSpace. | |
2304 ObjectIterator* iterator_; // object iterator for the current space. | |
2305 HeapObjectCallback size_func_; | |
2306 }; | |
2307 | |
2308 | |
2309 // A HeapIterator provides iteration over the whole heap. It | |
2310 // aggregates the specific iterators for the different spaces as | |
2311 // these can only iterate over one space only. | |
2312 // | |
2313 // HeapIterator ensures there is no allocation during its lifetime | |
2314 // (using an embedded DisallowHeapAllocation instance). | |
2315 // | |
2316 // HeapIterator can skip free list nodes (that is, de-allocated heap | |
2317 // objects that still remain in the heap). As implementation of free | |
2318 // nodes filtering uses GC marks, it can't be used during MS/MC GC | |
2319 // phases. Also, it is forbidden to interrupt iteration in this mode, | |
2320 // as this will leave heap objects marked (and thus, unusable). | |
2321 class HeapObjectsFilter; | |
2322 | |
2323 class HeapIterator BASE_EMBEDDED { | |
2324 public: | |
2325 enum HeapObjectsFiltering { | |
2326 kNoFiltering, | |
2327 kFilterUnreachable | |
2328 }; | |
2329 | |
2330 explicit HeapIterator(Heap* heap); | |
2331 HeapIterator(Heap* heap, HeapObjectsFiltering filtering); | |
2332 ~HeapIterator(); | |
2333 | |
2334 HeapObject* next(); | |
2335 void reset(); | |
2336 | |
2337 private: | |
2338 struct MakeHeapIterableHelper { | |
2339 explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); } | |
2340 }; | |
2341 | |
2342 // Perform the initialization. | |
2343 void Init(); | |
2344 // Perform all necessary shutdown (destruction) work. | |
2345 void Shutdown(); | |
2346 HeapObject* NextObject(); | |
2347 | |
2348 MakeHeapIterableHelper make_heap_iterable_helper_; | |
2349 DisallowHeapAllocation no_heap_allocation_; | |
2350 Heap* heap_; | |
2351 HeapObjectsFiltering filtering_; | |
2352 HeapObjectsFilter* filter_; | |
2353 // Space iterator for iterating all the spaces. | |
2354 SpaceIterator* space_iterator_; | |
2355 // Object iterator for the space currently being iterated. | |
2356 ObjectIterator* object_iterator_; | |
2357 }; | |
2358 | |
2359 | |
2360 // Cache for mapping (map, property name) into field offset. | |
2361 // Cleared at startup and prior to mark sweep collection. | |
2362 class KeyedLookupCache { | |
2363 public: | |
2364 // Lookup field offset for (map, name). If absent, -1 is returned. | |
2365 int Lookup(Handle<Map> map, Handle<Name> name); | |
2366 | |
2367 // Update an element in the cache. | |
2368 void Update(Handle<Map> map, Handle<Name> name, int field_offset); | |
2369 | |
2370 // Clear the cache. | |
2371 void Clear(); | |
2372 | |
2373 static const int kLength = 256; | |
2374 static const int kCapacityMask = kLength - 1; | |
2375 static const int kMapHashShift = 5; | |
2376 static const int kHashMask = -4; // Zero the last two bits. | |
2377 static const int kEntriesPerBucket = 4; | |
2378 static const int kEntryLength = 2; | |
2379 static const int kMapIndex = 0; | |
2380 static const int kKeyIndex = 1; | |
2381 static const int kNotFound = -1; | |
2382 | |
2383 // kEntriesPerBucket should be a power of 2. | |
2384 STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0); | |
2385 STATIC_ASSERT(kEntriesPerBucket == -kHashMask); | |
2386 | |
2387 private: | |
2388 KeyedLookupCache() { | |
2389 for (int i = 0; i < kLength; ++i) { | |
2390 keys_[i].map = NULL; | |
2391 keys_[i].name = NULL; | |
2392 field_offsets_[i] = kNotFound; | |
2393 } | |
2394 } | |
2395 | |
2396 static inline int Hash(Handle<Map> map, Handle<Name> name); | |
2397 | |
2398 // Get the address of the keys and field_offsets arrays. Used in | |
2399 // generated code to perform cache lookups. | |
2400 Address keys_address() { | |
2401 return reinterpret_cast<Address>(&keys_); | |
2402 } | |
2403 | |
2404 Address field_offsets_address() { | |
2405 return reinterpret_cast<Address>(&field_offsets_); | |
2406 } | |
2407 | |
2408 struct Key { | |
2409 Map* map; | |
2410 Name* name; | |
2411 }; | |
2412 | |
2413 Key keys_[kLength]; | |
2414 int field_offsets_[kLength]; | |
2415 | |
2416 friend class ExternalReference; | |
2417 friend class Isolate; | |
2418 DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache); | |
2419 }; | |
2420 | |
2421 | |
2422 // Cache for mapping (map, property name) into descriptor index. | |
2423 // The cache contains both positive and negative results. | |
2424 // Descriptor index equals kNotFound means the property is absent. | |
2425 // Cleared at startup and prior to any gc. | |
2426 class DescriptorLookupCache { | |
2427 public: | |
2428 // Lookup descriptor index for (map, name). | |
2429 // If absent, kAbsent is returned. | |
2430 int Lookup(Map* source, Name* name) { | |
2431 if (!name->IsUniqueName()) return kAbsent; | |
2432 int index = Hash(source, name); | |
2433 Key& key = keys_[index]; | |
2434 if ((key.source == source) && (key.name == name)) return results_[index]; | |
2435 return kAbsent; | |
2436 } | |
2437 | |
2438 // Update an element in the cache. | |
2439 void Update(Map* source, Name* name, int result) { | |
2440 DCHECK(result != kAbsent); | |
2441 if (name->IsUniqueName()) { | |
2442 int index = Hash(source, name); | |
2443 Key& key = keys_[index]; | |
2444 key.source = source; | |
2445 key.name = name; | |
2446 results_[index] = result; | |
2447 } | |
2448 } | |
2449 | |
2450 // Clear the cache. | |
2451 void Clear(); | |
2452 | |
2453 static const int kAbsent = -2; | |
2454 | |
2455 private: | |
2456 DescriptorLookupCache() { | |
2457 for (int i = 0; i < kLength; ++i) { | |
2458 keys_[i].source = NULL; | |
2459 keys_[i].name = NULL; | |
2460 results_[i] = kAbsent; | |
2461 } | |
2462 } | |
2463 | |
2464 static int Hash(Object* source, Name* name) { | |
2465 // Uses only lower 32 bits if pointers are larger. | |
2466 uint32_t source_hash = | |
2467 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) | |
2468 >> kPointerSizeLog2; | |
2469 uint32_t name_hash = | |
2470 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) | |
2471 >> kPointerSizeLog2; | |
2472 return (source_hash ^ name_hash) % kLength; | |
2473 } | |
2474 | |
2475 static const int kLength = 64; | |
2476 struct Key { | |
2477 Map* source; | |
2478 Name* name; | |
2479 }; | |
2480 | |
2481 Key keys_[kLength]; | |
2482 int results_[kLength]; | |
2483 | |
2484 friend class Isolate; | |
2485 DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache); | |
2486 }; | |
2487 | |
2488 | |
2489 class RegExpResultsCache { | |
2490 public: | |
2491 enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS }; | |
2492 | |
2493 // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi. | |
2494 // On success, the returned result is guaranteed to be a COW-array. | |
2495 static Object* Lookup(Heap* heap, | |
2496 String* key_string, | |
2497 Object* key_pattern, | |
2498 ResultsCacheType type); | |
2499 // Attempt to add value_array to the cache specified by type. On success, | |
2500 // value_array is turned into a COW-array. | |
2501 static void Enter(Isolate* isolate, | |
2502 Handle<String> key_string, | |
2503 Handle<Object> key_pattern, | |
2504 Handle<FixedArray> value_array, | |
2505 ResultsCacheType type); | |
2506 static void Clear(FixedArray* cache); | |
2507 static const int kRegExpResultsCacheSize = 0x100; | |
2508 | |
2509 private: | |
2510 static const int kArrayEntriesPerCacheEntry = 4; | |
2511 static const int kStringOffset = 0; | |
2512 static const int kPatternOffset = 1; | |
2513 static const int kArrayOffset = 2; | |
2514 }; | |
2515 | |
2516 | |
2517 // Abstract base class for checking whether a weak object should be retained. | |
2518 class WeakObjectRetainer { | |
2519 public: | |
2520 virtual ~WeakObjectRetainer() {} | |
2521 | |
2522 // Return whether this object should be retained. If NULL is returned the | |
2523 // object has no references. Otherwise the address of the retained object | |
2524 // should be returned as in some GC situations the object has been moved. | |
2525 virtual Object* RetainAs(Object* object) = 0; | |
2526 }; | |
2527 | |
2528 | |
2529 // Intrusive object marking uses least significant bit of | |
2530 // heap object's map word to mark objects. | |
2531 // Normally all map words have least significant bit set | |
2532 // because they contain tagged map pointer. | |
2533 // If the bit is not set object is marked. | |
2534 // All objects should be unmarked before resuming | |
2535 // JavaScript execution. | |
2536 class IntrusiveMarking { | |
2537 public: | |
2538 static bool IsMarked(HeapObject* object) { | |
2539 return (object->map_word().ToRawValue() & kNotMarkedBit) == 0; | |
2540 } | |
2541 | |
2542 static void ClearMark(HeapObject* object) { | |
2543 uintptr_t map_word = object->map_word().ToRawValue(); | |
2544 object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit)); | |
2545 DCHECK(!IsMarked(object)); | |
2546 } | |
2547 | |
2548 static void SetMark(HeapObject* object) { | |
2549 uintptr_t map_word = object->map_word().ToRawValue(); | |
2550 object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit)); | |
2551 DCHECK(IsMarked(object)); | |
2552 } | |
2553 | |
2554 static Map* MapOfMarkedObject(HeapObject* object) { | |
2555 uintptr_t map_word = object->map_word().ToRawValue(); | |
2556 return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap(); | |
2557 } | |
2558 | |
2559 static int SizeOfMarkedObject(HeapObject* object) { | |
2560 return object->SizeFromMap(MapOfMarkedObject(object)); | |
2561 } | |
2562 | |
2563 private: | |
2564 static const uintptr_t kNotMarkedBit = 0x1; | |
2565 STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); // NOLINT | |
2566 }; | |
2567 | |
2568 | |
2569 #ifdef DEBUG | |
2570 // Helper class for tracing paths to a search target Object from all roots. | |
2571 // The TracePathFrom() method can be used to trace paths from a specific | |
2572 // object to the search target object. | |
2573 class PathTracer : public ObjectVisitor { | |
2574 public: | |
2575 enum WhatToFind { | |
2576 FIND_ALL, // Will find all matches. | |
2577 FIND_FIRST // Will stop the search after first match. | |
2578 }; | |
2579 | |
2580 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. | |
2581 static const int kMarkTag = 2; | |
2582 | |
2583 // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop | |
2584 // after the first match. If FIND_ALL is specified, then tracing will be | |
2585 // done for all matches. | |
2586 PathTracer(Object* search_target, | |
2587 WhatToFind what_to_find, | |
2588 VisitMode visit_mode) | |
2589 : search_target_(search_target), | |
2590 found_target_(false), | |
2591 found_target_in_trace_(false), | |
2592 what_to_find_(what_to_find), | |
2593 visit_mode_(visit_mode), | |
2594 object_stack_(20), | |
2595 no_allocation() {} | |
2596 | |
2597 virtual void VisitPointers(Object** start, Object** end); | |
2598 | |
2599 void Reset(); | |
2600 void TracePathFrom(Object** root); | |
2601 | |
2602 bool found() const { return found_target_; } | |
2603 | |
2604 static Object* const kAnyGlobalObject; | |
2605 | |
2606 protected: | |
2607 class MarkVisitor; | |
2608 class UnmarkVisitor; | |
2609 | |
2610 void MarkRecursively(Object** p, MarkVisitor* mark_visitor); | |
2611 void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor); | |
2612 virtual void ProcessResults(); | |
2613 | |
2614 Object* search_target_; | |
2615 bool found_target_; | |
2616 bool found_target_in_trace_; | |
2617 WhatToFind what_to_find_; | |
2618 VisitMode visit_mode_; | |
2619 List<Object*> object_stack_; | |
2620 | |
2621 DisallowHeapAllocation no_allocation; // i.e. no gc allowed. | |
2622 | |
2623 private: | |
2624 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); | |
2625 }; | |
2626 #endif // DEBUG | |
2627 | |
2628 } } // namespace v8::internal | |
2629 | |
2630 #endif // V8_HEAP_H_ | |
OLD | NEW |