Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/heap.h

Issue 435003: Patch for allowing several V8 instances in process:... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 11 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/handles.cc ('k') | src/heap.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
183 V(exec_symbol, "exec") \ 183 V(exec_symbol, "exec") \
184 V(zero_symbol, "0") \ 184 V(zero_symbol, "0") \
185 V(global_eval_symbol, "GlobalEval") \ 185 V(global_eval_symbol, "GlobalEval") \
186 V(identity_hash_symbol, "v8::IdentityHash") \ 186 V(identity_hash_symbol, "v8::IdentityHash") \
187 V(closure_symbol, "(closure)") 187 V(closure_symbol, "(closure)")
188 188
189 189
190 // Forward declaration of the GCTracer class. 190 // Forward declaration of the GCTracer class.
191 class GCTracer; 191 class GCTracer;
192 class HeapStats; 192 class HeapStats;
193 class HeapPrivateData;
193 194
195 class HeapDataConstants {
196 public:
197 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
198
199 // Declare all the root indices.
200 enum RootListIndex {
201 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
202 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
203 #undef ROOT_INDEX_DECLARATION
204
205 // Utility type maps
206 #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
207 STRUCT_LIST(DECLARE_STRUCT_MAP)
208 #undef DECLARE_STRUCT_MAP
209
210 #define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
211 SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
212 #undef SYMBOL_DECLARATION
213
214 kSymbolTableRootIndex,
215 kStrongRootListLength = kSymbolTableRootIndex,
216 kRootListLength
217 };
218 };
219
220 class HeapData: public HeapDataConstants {
221 int reserved_semispace_size_;
222 int max_semispace_size_;
223 int initial_semispace_size_;
224 int max_old_generation_size_;
225 size_t code_range_size_;
226
227 // Flag is set when the heap has been configured. The heap can be repeatedly
228 // configured through the API until it is setup.
229 bool heap_configured_;
230
231 // For keeping track of how much data has survived
232 // scavenge since last new space expansion.
233 int survived_since_last_expansion_;
234
235 int always_allocate_scope_depth_;
236 int linear_allocation_scope_depth_;
237 bool context_disposed_pending_;
238
239 NewSpace new_space_;
240 OldSpace* old_pointer_space_;
241 OldSpace* old_data_space_;
242 OldSpace* code_space_;
243 MapSpace* map_space_;
244 CellSpace* cell_space_;
245 LargeObjectSpace* lo_space_;
246
247 HeapState gc_state_;
248
249 int mc_count_; // how many mark-compact collections happened
250 int gc_count_; // how many gc happened
251
252 #ifdef DEBUG
253 bool allocation_allowed_;
254
255 // If the --gc-interval flag is set to a positive value, this
256 // variable holds the value indicating the number of allocations
257 // remain until the next failure and garbage collection.
258 int allocation_timeout_;
259
260 // Do we expect to be able to handle allocation failure at this
261 // time?
262 bool disallow_allocation_failure_;
263 #endif // DEBUG
264
265 // Limit that triggers a global GC on the next (normally caused) GC. This
266 // is checked when we have already decided to do a GC to help determine
267 // which collector to invoke.
268 int old_gen_promotion_limit_;
269
270 // Limit that triggers a global GC as soon as is reasonable. This is
271 // checked before expanding a paged space in the old generation and on
272 // every allocation in large object space.
273 int old_gen_allocation_limit_;
274
275 // Limit on the amount of externally allocated memory allowed
276 // between global GCs. If reached a global GC is forced.
277 int external_allocation_limit_;
278
279 // The amount of external memory registered through the API kept alive
280 // by global handles
281 int amount_of_external_allocated_memory_;
282
283 // Caches the amount of external memory registered at the last global gc.
284 int amount_of_external_allocated_memory_at_last_global_gc_;
285
286 // Indicates that an allocation has failed in the old generation since the
287 // last GC.
288 int old_gen_exhausted_;
289
290 Object* roots_[kRootListLength];
291
292 // The special hidden symbol which is an empty string, but does not match
293 // any string when looked up in properties.
294 String* hidden_symbol_;
295
296 // GC callback function, called before and after mark-compact GC.
297 // Allocations in the callback function are disallowed.
298 GCCallback global_gc_prologue_callback_;
299 GCCallback global_gc_epilogue_callback_;
300
301 HeapPrivateData& heap_private_data_;
302
303 HeapData();
304 ~HeapData();
305
306 friend class Factory;
307 friend class DisallowAllocationFailure;
308 friend class AlwaysAllocateScope;
309 friend class LinearAllocationScope;
310 friend class Heap;
311 friend class V8Context;
312
313 DISALLOW_COPY_AND_ASSIGN(HeapData);
314 };
194 315
195 // The all static Heap captures the interface to the global object heap. 316 // The all static Heap captures the interface to the global object heap.
196 // All JavaScript contexts by this process share the same object heap. 317 // All JavaScript contexts by this process share the same object heap.
197 318
198 class Heap : public AllStatic { 319 class Heap : public AllStatic, public HeapDataConstants {
199 public: 320 public:
200 // Configure heap size before setup. Return false if the heap has been 321 // Configure heap size before setup. Return false if the heap has been
201 // setup already. 322 // setup already.
202 static bool ConfigureHeap(int max_semispace_size, int max_old_gen_size); 323 static bool ConfigureHeap(int max_semispace_size, int max_old_gen_size);
203 static bool ConfigureHeapDefault(); 324 static bool ConfigureHeapDefault();
204 325
205 // Initializes the global object heap. If create_heap_objects is true, 326 // Initializes the global object heap. If create_heap_objects is true,
206 // also creates the basic non-mutable objects. 327 // also creates the basic non-mutable objects.
207 // Returns whether it succeeded. 328 // Returns whether it succeeded.
208 static bool Setup(bool create_heap_objects); 329 static bool Setup(bool create_heap_objects);
209 330
210 // Destroys all memory allocated by the heap. 331 // Destroys all memory allocated by the heap.
211 static void TearDown(); 332 static void TearDown();
212 333
213 // Set the stack limit in the roots_ array. Some architectures generate 334 // Set the stack limit in the roots_ array. Some architectures generate
214 // code that looks here, because it is faster than loading from the static 335 // code that looks here, because it is faster than loading from the static
215 // jslimit_/real_jslimit_ variable in the StackGuard. 336 // jslimit_/real_jslimit_ variable in the StackGuard.
216 static void SetStackLimits(); 337 static void SetStackLimits();
217 338
218 // Returns whether Setup has been called. 339 // Returns whether Setup has been called.
219 static bool HasBeenSetup(); 340 static bool HasBeenSetup();
220 341
221 // Returns the maximum amount of memory reserved for the heap. For 342 // Returns the maximum amount of memory reserved for the heap. For
222 // the young generation, we reserve 4 times the amount needed for a 343 // the young generation, we reserve 4 times the amount needed for a
223 // semi space. The young generation consists of two semi spaces and 344 // semi space. The young generation consists of two semi spaces and
224 // we reserve twice the amount needed for those in order to ensure 345 // we reserve twice the amount needed for those in order to ensure
225 // that new space can be aligned to its size. 346 // that new space can be aligned to its size.
226 static int MaxReserved() { 347 static int MaxReserved() {
227 return 4 * reserved_semispace_size_ + max_old_generation_size_; 348 return 4 * v8_context()->heap_data_.reserved_semispace_size_ +
349 v8_context()->heap_data_.max_old_generation_size_;
228 } 350 }
229 static int MaxSemiSpaceSize() { return max_semispace_size_; } 351 static int MaxSemiSpaceSize() {
230 static int ReservedSemiSpaceSize() { return reserved_semispace_size_; } 352 return v8_context()->heap_data_.max_semispace_size_;
231 static int InitialSemiSpaceSize() { return initial_semispace_size_; } 353 }
232 static int MaxOldGenerationSize() { return max_old_generation_size_; } 354 static int ReservedSemiSpaceSize() {
355 return v8_context()->heap_data_.reserved_semispace_size_;
356 }
357 static int InitialSemiSpaceSize() {
358 return v8_context()->heap_data_.initial_semispace_size_;
359 }
360 static int MaxOldGenerationSize() {
361 return v8_context()->heap_data_.max_old_generation_size_;
362 }
233 363
234 // Returns the capacity of the heap in bytes w/o growing. Heap grows when 364 // Returns the capacity of the heap in bytes w/o growing. Heap grows when
235 // more spaces are needed until it reaches the limit. 365 // more spaces are needed until it reaches the limit.
236 static int Capacity(); 366 static int Capacity();
237 367
238 // Returns the amount of memory currently committed for the heap. 368 // Returns the amount of memory currently committed for the heap.
239 static int CommittedMemory(); 369 static int CommittedMemory();
240 370
241 // Returns the available bytes in space w/o growing. 371 // Returns the available bytes in space w/o growing.
242 // Heap doesn't guarantee that it can allocate an object that requires 372 // Heap doesn't guarantee that it can allocate an object that requires
243 // all available bytes. Check MaxHeapObjectSize() instead. 373 // all available bytes. Check MaxHeapObjectSize() instead.
244 static int Available(); 374 static int Available();
245 375
246 // Returns the maximum object size in paged space. 376 // Returns the maximum object size in paged space.
247 static inline int MaxObjectSizeInPagedSpace(); 377 static inline int MaxObjectSizeInPagedSpace();
248 378
249 // Returns of size of all objects residing in the heap. 379 // Returns of size of all objects residing in the heap.
250 static int SizeOfObjects(); 380 static int SizeOfObjects();
251 381
252 // Return the starting address and a mask for the new space. And-masking an 382 // Return the starting address and a mask for the new space. And-masking an
253 // address with the mask will result in the start address of the new space 383 // address with the mask will result in the start address of the new space
254 // for all addresses in either semispace. 384 // for all addresses in either semispace.
255 static Address NewSpaceStart() { return new_space_.start(); } 385 static Address NewSpaceStart() {
256 static uintptr_t NewSpaceMask() { return new_space_.mask(); } 386 return v8_context()->heap_data_.new_space_.start();
257 static Address NewSpaceTop() { return new_space_.top(); } 387 }
388 static uintptr_t NewSpaceMask() {
389 return v8_context()->heap_data_.new_space_.mask();
390 }
391 static Address NewSpaceTop() {
392 return v8_context()->heap_data_.new_space_.top();
393 }
258 394
259 static NewSpace* new_space() { return &new_space_; } 395 static NewSpace* new_space() {
260 static OldSpace* old_pointer_space() { return old_pointer_space_; } 396 return &v8_context()->heap_data_.new_space_;
261 static OldSpace* old_data_space() { return old_data_space_; } 397 }
262 static OldSpace* code_space() { return code_space_; } 398 static OldSpace* old_pointer_space() {
263 static MapSpace* map_space() { return map_space_; } 399 return v8_context()->heap_data_.old_pointer_space_;
264 static CellSpace* cell_space() { return cell_space_; } 400 }
265 static LargeObjectSpace* lo_space() { return lo_space_; } 401 static OldSpace* old_data_space() {
402 return v8_context()->heap_data_.old_data_space_;
403 }
404 static OldSpace* code_space() {
405 return v8_context()->heap_data_.code_space_;
406 }
407 static MapSpace* map_space() {
408 return v8_context()->heap_data_.map_space_;
409 }
410 static CellSpace* cell_space() {
411 return v8_context()->heap_data_.cell_space_;
412 }
413 static LargeObjectSpace* lo_space() {
414 return v8_context()->heap_data_.lo_space_;
415 }
266 416
267 static bool always_allocate() { return always_allocate_scope_depth_ != 0; } 417 static bool always_allocate() {
418 return v8_context()->heap_data_.always_allocate_scope_depth_ != 0;
419 }
268 static Address always_allocate_scope_depth_address() { 420 static Address always_allocate_scope_depth_address() {
269 return reinterpret_cast<Address>(&always_allocate_scope_depth_); 421 return reinterpret_cast<Address>(
422 &v8_context()->heap_data_.always_allocate_scope_depth_);
270 } 423 }
271 static bool linear_allocation() { 424 static bool linear_allocation() {
272 return linear_allocation_scope_depth_ != 0; 425 return v8_context()->heap_data_.linear_allocation_scope_depth_ != 0;
273 } 426 }
274 427
275 static Address* NewSpaceAllocationTopAddress() { 428 static Address* NewSpaceAllocationTopAddress() {
276 return new_space_.allocation_top_address(); 429 return v8_context()->heap_data_.new_space_.allocation_top_address();
277 } 430 }
278 static Address* NewSpaceAllocationLimitAddress() { 431 static Address* NewSpaceAllocationLimitAddress() {
279 return new_space_.allocation_limit_address(); 432 return v8_context()->heap_data_.new_space_.allocation_limit_address();
280 } 433 }
281 434
282 // Uncommit unused semi space. 435 // Uncommit unused semi space.
283 static bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } 436 static bool UncommitFromSpace() {
437 return v8_context()->heap_data_.new_space_.UncommitFromSpace();
438 }
284 439
285 #ifdef ENABLE_HEAP_PROTECTION 440 #ifdef ENABLE_HEAP_PROTECTION
286 // Protect/unprotect the heap by marking all spaces read-only/writable. 441 // Protect/unprotect the heap by marking all spaces read-only/writable.
287 static void Protect(); 442 static void Protect();
288 static void Unprotect(); 443 static void Unprotect();
289 #endif 444 #endif
290 445
291 // Allocates and initializes a new JavaScript object based on a 446 // Allocates and initializes a new JavaScript object based on a
292 // constructor. 447 // constructor.
293 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation 448 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
(...skipping 342 matching lines...) Expand 10 before | Expand all | Expand 10 after
636 // Utility to invoke the scavenger. This is needed in test code to 791 // Utility to invoke the scavenger. This is needed in test code to
637 // ensure correct callback for weak global handles. 792 // ensure correct callback for weak global handles.
638 static void PerformScavenge(); 793 static void PerformScavenge();
639 794
640 #ifdef DEBUG 795 #ifdef DEBUG
641 // Utility used with flag gc-greedy. 796 // Utility used with flag gc-greedy.
642 static bool GarbageCollectionGreedyCheck(); 797 static bool GarbageCollectionGreedyCheck();
643 #endif 798 #endif
644 799
645 static void SetGlobalGCPrologueCallback(GCCallback callback) { 800 static void SetGlobalGCPrologueCallback(GCCallback callback) {
646 global_gc_prologue_callback_ = callback; 801 v8_context()->heap_data_.global_gc_prologue_callback_ = callback;
647 } 802 }
648 static void SetGlobalGCEpilogueCallback(GCCallback callback) { 803 static void SetGlobalGCEpilogueCallback(GCCallback callback) {
649 global_gc_epilogue_callback_ = callback; 804 v8_context()->heap_data_.global_gc_epilogue_callback_ = callback;
650 } 805 }
651 806
652 // Heap root getters. We have versions with and without type::cast() here. 807 // Heap root getters. We have versions with and without type::cast() here.
653 // You can't use type::cast during GC because the assert fails. 808 // You can't use type::cast during GC because the assert fails.
654 #define ROOT_ACCESSOR(type, name, camel_name) \ 809 #define ROOT_ACCESSOR(type, name, camel_name) \
655 static inline type* name() { \ 810 static inline type* name() { \
656 return type::cast(roots_[k##camel_name##RootIndex]); \ 811 return type::cast( \
812 v8_context()->heap_data_.roots_[k##camel_name##RootIndex]); \
657 } \ 813 } \
658 static inline type* raw_unchecked_##name() { \ 814 static inline type* raw_unchecked_##name() { \
659 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \ 815 return reinterpret_cast<type*>( \
816 v8_context()->heap_data_.roots_[k##camel_name##RootIndex]); \
660 } 817 }
661 ROOT_LIST(ROOT_ACCESSOR) 818 ROOT_LIST(ROOT_ACCESSOR)
662 #undef ROOT_ACCESSOR 819 #undef ROOT_ACCESSOR
663 820
664 // Utility type maps 821 // Utility type maps
665 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) \ 822 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
666 static inline Map* name##_map() { \ 823 static inline Map* name##_map() { \
667 return Map::cast(roots_[k##Name##MapRootIndex]); \ 824 return Map::cast(v8_context()->heap_data_.roots_[k##Name##MapRootIndex]);\
668 } 825 }
669 STRUCT_LIST(STRUCT_MAP_ACCESSOR) 826 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
670 #undef STRUCT_MAP_ACCESSOR 827 #undef STRUCT_MAP_ACCESSOR
671 828
672 #define SYMBOL_ACCESSOR(name, str) static inline String* name() { \ 829 #define SYMBOL_ACCESSOR(name, str) static inline String* name() { \
673 return String::cast(roots_[k##name##RootIndex]); \ 830 return String::cast(v8_context()->heap_data_.roots_[k##name##RootIndex]); \
674 } 831 }
675 SYMBOL_LIST(SYMBOL_ACCESSOR) 832 SYMBOL_LIST(SYMBOL_ACCESSOR)
676 #undef SYMBOL_ACCESSOR 833 #undef SYMBOL_ACCESSOR
677 834
678 // The hidden_symbol is special because it is the empty string, but does 835 // The hidden_symbol is special because it is the empty string, but does
679 // not match the empty string. 836 // not match the empty string.
680 static String* hidden_symbol() { return hidden_symbol_; } 837 static String* hidden_symbol() {
838 return v8_context()->heap_data_.hidden_symbol_;
839 }
681 840
682 // Iterates over all roots in the heap. 841 // Iterates over all roots in the heap.
683 static void IterateRoots(ObjectVisitor* v, VisitMode mode); 842 static void IterateRoots(ObjectVisitor* v, VisitMode mode);
684 // Iterates over all strong roots in the heap. 843 // Iterates over all strong roots in the heap.
685 static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode); 844 static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
686 845
687 // Iterates remembered set of an old space. 846 // Iterates remembered set of an old space.
688 static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback); 847 static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
689 848
690 // Iterates a range of remembered set addresses starting with rset_start 849 // Iterates a range of remembered set addresses starting with rset_start
(...skipping 19 matching lines...) Expand all
710 // Currently used by tests, serialization and heap verification only. 869 // Currently used by tests, serialization and heap verification only.
711 static bool InSpace(Address addr, AllocationSpace space); 870 static bool InSpace(Address addr, AllocationSpace space);
712 static bool InSpace(HeapObject* value, AllocationSpace space); 871 static bool InSpace(HeapObject* value, AllocationSpace space);
713 872
714 // Finds out which space an object should get promoted to based on its type. 873 // Finds out which space an object should get promoted to based on its type.
715 static inline OldSpace* TargetSpace(HeapObject* object); 874 static inline OldSpace* TargetSpace(HeapObject* object);
716 static inline AllocationSpace TargetSpaceId(InstanceType type); 875 static inline AllocationSpace TargetSpaceId(InstanceType type);
717 876
718 // Sets the stub_cache_ (only used when expanding the dictionary). 877 // Sets the stub_cache_ (only used when expanding the dictionary).
719 static void public_set_code_stubs(NumberDictionary* value) { 878 static void public_set_code_stubs(NumberDictionary* value) {
720 roots_[kCodeStubsRootIndex] = value; 879 v8_context()->heap_data_.roots_[kCodeStubsRootIndex] = value;
721 } 880 }
722 881
723 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). 882 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
724 static void public_set_non_monomorphic_cache(NumberDictionary* value) { 883 static void public_set_non_monomorphic_cache(NumberDictionary* value) {
725 roots_[kNonMonomorphicCacheRootIndex] = value; 884 v8_context()->heap_data_.roots_[kNonMonomorphicCacheRootIndex] = value;
726 } 885 }
727 886
728 // Update the next script id. 887 // Update the next script id.
729 static inline void SetLastScriptId(Object* last_script_id); 888 static inline void SetLastScriptId(Object* last_script_id);
730 889
731 // Generated code can embed this address to get access to the roots. 890 // Generated code can embed this address to get access to the roots.
732 static Object** roots_address() { return roots_; } 891 static Object** roots_address() { return v8_context()->heap_data_.roots_; }
733 892
734 #ifdef DEBUG 893 #ifdef DEBUG
735 static void Print(); 894 static void Print();
736 static void PrintHandles(); 895 static void PrintHandles();
737 896
738 // Verify the heap is in its normal state before or after a GC. 897 // Verify the heap is in its normal state before or after a GC.
739 static void Verify(); 898 static void Verify();
740 899
741 // Report heap statistics. 900 // Report heap statistics.
742 static void ReportHeapStatistics(const char* title); 901 static void ReportHeapStatistics(const char* title);
(...skipping 17 matching lines...) Expand all
760 919
761 // Write barrier support for address[offset] = o. 920 // Write barrier support for address[offset] = o.
762 static inline void RecordWrite(Address address, int offset); 921 static inline void RecordWrite(Address address, int offset);
763 922
764 // Given an address occupied by a live code object, return that object. 923 // Given an address occupied by a live code object, return that object.
765 static Object* FindCodeObject(Address a); 924 static Object* FindCodeObject(Address a);
766 925
767 // Invoke Shrink on shrinkable spaces. 926 // Invoke Shrink on shrinkable spaces.
768 static void Shrink(); 927 static void Shrink();
769 928
770 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; 929 static inline HeapState gc_state() {
771 static inline HeapState gc_state() { return gc_state_; } 930 return v8_context()->heap_data_.gc_state_;
931 }
772 932
773 #ifdef DEBUG 933 #ifdef DEBUG
774 static bool IsAllocationAllowed() { return allocation_allowed_; } 934 static bool IsAllocationAllowed() {
935 return v8_context()->heap_data_.allocation_allowed_;
936 }
775 static inline bool allow_allocation(bool enable); 937 static inline bool allow_allocation(bool enable);
776 938
777 static bool disallow_allocation_failure() { 939 static bool disallow_allocation_failure() {
778 return disallow_allocation_failure_; 940 return v8_context()->heap_data_.disallow_allocation_failure_;
779 } 941 }
780 942
781 static void TracePathToObject(); 943 static void TracePathToObject();
782 static void TracePathToGlobal(); 944 static void TracePathToGlobal();
783 #endif 945 #endif
784 946
785 // Callback function passed to Heap::Iterate etc. Copies an object if 947 // Callback function passed to Heap::Iterate etc. Copies an object if
786 // necessary, the object might be promoted to an old space. The caller must 948 // necessary, the object might be promoted to an old space. The caller must
787 // ensure the precondition that the object is (a) a heap object and (b) in 949 // ensure the precondition that the object is (a) a heap object and (b) in
788 // the heap's from space. 950 // the heap's from space.
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
820 // Returns the adjusted value. 982 // Returns the adjusted value.
821 static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes); 983 static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
822 984
823 // Allocate unitialized fixed array (pretenure == NON_TENURE). 985 // Allocate unitialized fixed array (pretenure == NON_TENURE).
824 static Object* AllocateRawFixedArray(int length); 986 static Object* AllocateRawFixedArray(int length);
825 987
826 // True if we have reached the allocation limit in the old generation that 988 // True if we have reached the allocation limit in the old generation that
827 // should force the next GC (caused normally) to be a full one. 989 // should force the next GC (caused normally) to be a full one.
828 static bool OldGenerationPromotionLimitReached() { 990 static bool OldGenerationPromotionLimitReached() {
829 return (PromotedSpaceSize() + PromotedExternalMemorySize()) 991 return (PromotedSpaceSize() + PromotedExternalMemorySize())
830 > old_gen_promotion_limit_; 992 > v8_context()->heap_data_.old_gen_promotion_limit_;
831 } 993 }
832 994
833 // True if we have reached the allocation limit in the old generation that 995 // True if we have reached the allocation limit in the old generation that
834 // should artificially cause a GC right now. 996 // should artificially cause a GC right now.
835 static bool OldGenerationAllocationLimitReached() { 997 static bool OldGenerationAllocationLimitReached() {
836 return (PromotedSpaceSize() + PromotedExternalMemorySize()) 998 return (PromotedSpaceSize() + PromotedExternalMemorySize())
837 > old_gen_allocation_limit_; 999 > v8_context()->heap_data_.old_gen_allocation_limit_;
838 } 1000 }
839 1001
840 // Can be called when the embedding application is idle. 1002 // Can be called when the embedding application is idle.
841 static bool IdleNotification(); 1003 static bool IdleNotification();
842 1004
843 // Declare all the root indices.
844 enum RootListIndex {
845 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
846 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
847 #undef ROOT_INDEX_DECLARATION
848
849 // Utility type maps
850 #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
851 STRUCT_LIST(DECLARE_STRUCT_MAP)
852 #undef DECLARE_STRUCT_MAP
853
854 #define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
855 SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
856 #undef SYMBOL_DECLARATION
857
858 kSymbolTableRootIndex,
859 kStrongRootListLength = kSymbolTableRootIndex,
860 kRootListLength
861 };
862 1005
863 static Object* NumberToString(Object* number); 1006 static Object* NumberToString(Object* number);
864 1007
865 static Map* MapForExternalArrayType(ExternalArrayType array_type); 1008 static Map* MapForExternalArrayType(ExternalArrayType array_type);
866 static RootListIndex RootIndexForExternalArrayType( 1009 static RootListIndex RootIndexForExternalArrayType(
867 ExternalArrayType array_type); 1010 ExternalArrayType array_type);
868 1011
869 static void RecordStats(HeapStats* stats); 1012 static void RecordStats(HeapStats* stats);
870 1013
871 private: 1014 private:
872 static int reserved_semispace_size_;
873 static int max_semispace_size_;
874 static int initial_semispace_size_;
875 static int max_old_generation_size_;
876 static size_t code_range_size_;
877
878 // For keeping track of how much data has survived
879 // scavenge since last new space expansion.
880 static int survived_since_last_expansion_;
881
882 static int always_allocate_scope_depth_;
883 static int linear_allocation_scope_depth_;
884 static bool context_disposed_pending_;
885 1015
886 // The number of MapSpace pages is limited by the way we pack 1016 // The number of MapSpace pages is limited by the way we pack
887 // Map pointers during GC. 1017 // Map pointers during GC.
888 static const int kMaxMapSpaceSize = 1018 static const int kMaxMapSpaceSize =
889 (1 << MapWord::kMapPageIndexBits) * Page::kPageSize; 1019 (1 << MapWord::kMapPageIndexBits) * Page::kPageSize;
890 1020
891 #if defined(V8_TARGET_ARCH_X64) 1021 #if defined(V8_TARGET_ARCH_X64)
892 static const int kMaxObjectSizeInNewSpace = 512*KB; 1022 static const int kMaxObjectSizeInNewSpace = 512*KB;
893 #else 1023 #else
894 static const int kMaxObjectSizeInNewSpace = 256*KB; 1024 static const int kMaxObjectSizeInNewSpace = 256*KB;
895 #endif 1025 #endif
896 1026
897 static NewSpace new_space_;
898 static OldSpace* old_pointer_space_;
899 static OldSpace* old_data_space_;
900 static OldSpace* code_space_;
901 static MapSpace* map_space_;
902 static CellSpace* cell_space_;
903 static LargeObjectSpace* lo_space_;
904 static HeapState gc_state_;
905
906 // Returns the size of object residing in non new spaces. 1027 // Returns the size of object residing in non new spaces.
907 static int PromotedSpaceSize(); 1028 static int PromotedSpaceSize();
908 1029
909 // Returns the amount of external memory registered since last global gc. 1030 // Returns the amount of external memory registered since last global gc.
910 static int PromotedExternalMemorySize(); 1031 static int PromotedExternalMemorySize();
911 1032
912 static int mc_count_; // how many mark-compact collections happened
913 static int gc_count_; // how many gc happened
914
915 #define ROOT_ACCESSOR(type, name, camel_name) \ 1033 #define ROOT_ACCESSOR(type, name, camel_name) \
916 static inline void set_##name(type* value) { \ 1034 static inline void set_##name(type* value) { \
917 roots_[k##camel_name##RootIndex] = value; \ 1035 v8_context()->heap_data_.roots_[k##camel_name##RootIndex] = value; \
918 } 1036 }
919 ROOT_LIST(ROOT_ACCESSOR) 1037 ROOT_LIST(ROOT_ACCESSOR)
920 #undef ROOT_ACCESSOR 1038 #undef ROOT_ACCESSOR
921 1039
922 #ifdef DEBUG
923 static bool allocation_allowed_;
924
925 // If the --gc-interval flag is set to a positive value, this
926 // variable holds the value indicating the number of allocations
927 // remain until the next failure and garbage collection.
928 static int allocation_timeout_;
929
930 // Do we expect to be able to handle allocation failure at this
931 // time?
932 static bool disallow_allocation_failure_;
933 #endif // DEBUG
934
935 // Limit that triggers a global GC on the next (normally caused) GC. This
936 // is checked when we have already decided to do a GC to help determine
937 // which collector to invoke.
938 static int old_gen_promotion_limit_;
939
940 // Limit that triggers a global GC as soon as is reasonable. This is
941 // checked before expanding a paged space in the old generation and on
942 // every allocation in large object space.
943 static int old_gen_allocation_limit_;
944
945 // Limit on the amount of externally allocated memory allowed
946 // between global GCs. If reached a global GC is forced.
947 static int external_allocation_limit_;
948
949 // The amount of external memory registered through the API kept alive
950 // by global handles
951 static int amount_of_external_allocated_memory_;
952
953 // Caches the amount of external memory registered at the last global gc.
954 static int amount_of_external_allocated_memory_at_last_global_gc_;
955
956 // Indicates that an allocation has failed in the old generation since the
957 // last GC.
958 static int old_gen_exhausted_;
959
960 static Object* roots_[kRootListLength];
961
962 struct StringTypeTable { 1040 struct StringTypeTable {
963 InstanceType type; 1041 InstanceType type;
964 int size; 1042 int size;
965 RootListIndex index; 1043 RootListIndex index;
966 }; 1044 };
967 1045
968 struct ConstantSymbolTable { 1046 struct ConstantSymbolTable {
969 const char* contents; 1047 const char* contents;
970 RootListIndex index; 1048 RootListIndex index;
971 }; 1049 };
972 1050
973 struct StructTable { 1051 struct StructTable {
974 InstanceType type; 1052 InstanceType type;
975 int size; 1053 int size;
976 RootListIndex index; 1054 RootListIndex index;
977 }; 1055 };
978 1056
979 static const StringTypeTable string_type_table[]; 1057 static const StringTypeTable string_type_table[];
980 static const ConstantSymbolTable constant_symbol_table[]; 1058 static const ConstantSymbolTable constant_symbol_table[];
981 static const StructTable struct_table[]; 1059 static const StructTable struct_table[];
982 1060
983 // The special hidden symbol which is an empty string, but does not match
984 // any string when looked up in properties.
985 static String* hidden_symbol_;
986
987 // GC callback function, called before and after mark-compact GC.
988 // Allocations in the callback function are disallowed.
989 static GCCallback global_gc_prologue_callback_;
990 static GCCallback global_gc_epilogue_callback_;
991
992 // Checks whether a global GC is necessary 1061 // Checks whether a global GC is necessary
993 static GarbageCollector SelectGarbageCollector(AllocationSpace space); 1062 static GarbageCollector SelectGarbageCollector(AllocationSpace space);
994 1063
995 // Performs garbage collection 1064 // Performs garbage collection
996 static void PerformGarbageCollection(AllocationSpace space, 1065 static void PerformGarbageCollection(AllocationSpace space,
997 GarbageCollector collector, 1066 GarbageCollector collector,
998 GCTracer* tracer); 1067 GCTracer* tracer);
999 1068
1000 // Returns either a Smi or a Number object from 'value'. If 'new_object' 1069 // Returns either a Smi or a Number object from 'value'. If 'new_object'
1001 // is false, it may return a preallocated immutable object. 1070 // is false, it may return a preallocated immutable object.
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
1128 }; 1197 };
1129 1198
1130 1199
1131 class AlwaysAllocateScope { 1200 class AlwaysAllocateScope {
1132 public: 1201 public:
1133 AlwaysAllocateScope() { 1202 AlwaysAllocateScope() {
1134 // We shouldn't hit any nested scopes, because that requires 1203 // We shouldn't hit any nested scopes, because that requires
1135 // non-handle code to call handle code. The code still works but 1204 // non-handle code to call handle code. The code still works but
1136 // performance will degrade, so we want to catch this situation 1205 // performance will degrade, so we want to catch this situation
1137 // in debug mode. 1206 // in debug mode.
1138 ASSERT(Heap::always_allocate_scope_depth_ == 0); 1207 ASSERT(v8_context()->heap_data_.always_allocate_scope_depth_ == 0);
1139 Heap::always_allocate_scope_depth_++; 1208 v8_context()->heap_data_.always_allocate_scope_depth_++;
1140 } 1209 }
1141 1210
1142 ~AlwaysAllocateScope() { 1211 ~AlwaysAllocateScope() {
1143 Heap::always_allocate_scope_depth_--; 1212 v8_context()->heap_data_.always_allocate_scope_depth_--;
1144 ASSERT(Heap::always_allocate_scope_depth_ == 0); 1213 ASSERT(v8_context()->heap_data_.always_allocate_scope_depth_ == 0);
1145 } 1214 }
1146 }; 1215 };
1147 1216
1148 1217
1149 class LinearAllocationScope { 1218 class LinearAllocationScope {
1150 public: 1219 public:
1151 LinearAllocationScope() { 1220 LinearAllocationScope() {
1152 Heap::linear_allocation_scope_depth_++; 1221 v8_context()->heap_data_.linear_allocation_scope_depth_++;
1153 } 1222 }
1154 1223
1155 ~LinearAllocationScope() { 1224 ~LinearAllocationScope() {
1156 Heap::linear_allocation_scope_depth_--; 1225 v8_context()->heap_data_.linear_allocation_scope_depth_--;
1157 ASSERT(Heap::linear_allocation_scope_depth_ >= 0); 1226 ASSERT(v8_context()->heap_data_.linear_allocation_scope_depth_ >= 0);
1158 } 1227 }
1159 }; 1228 };
1160 1229
1161 1230
1162 #ifdef DEBUG 1231 #ifdef DEBUG
1163 // Visitor class to verify interior pointers that do not have remembered set 1232 // Visitor class to verify interior pointers that do not have remembered set
1164 // bits. All heap object pointers have to point into the heap to a location 1233 // bits. All heap object pointers have to point into the heap to a location
1165 // that has a map pointer at its first word. Caveat: Heap::Contains is an 1234 // that has a map pointer at its first word. Caveat: Heap::Contains is an
1166 // approximation because it can return true for objects in a heap space but 1235 // approximation because it can return true for objects in a heap space but
1167 // above the allocation pointer. 1236 // above the allocation pointer.
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
1273 1342
1274 // Perform all necessary shutdown (destruction) work. 1343 // Perform all necessary shutdown (destruction) work.
1275 void Shutdown(); 1344 void Shutdown();
1276 1345
1277 // Space iterator for iterating all the spaces. 1346 // Space iterator for iterating all the spaces.
1278 SpaceIterator* space_iterator_; 1347 SpaceIterator* space_iterator_;
1279 // Object iterator for the space currently being iterated. 1348 // Object iterator for the space currently being iterated.
1280 ObjectIterator* object_iterator_; 1349 ObjectIterator* object_iterator_;
1281 }; 1350 };
1282 1351
1352 class KeyedLookupCacheData {
1353 struct Key {
1354 Map* map;
1355 String* name;
1356 };
1357 static const int kLength = 64;
1358 Key keys_[kLength];
1359 int field_offsets_[kLength];
1360
1361 KeyedLookupCacheData() {
1362 for (int i = 0; i < kLength; ++i) {
1363 keys_[i].map = NULL;
1364 keys_[i].name = NULL;
1365 i[field_offsets_] = 0;
1366 }
1367 }
1368
1369 friend class KeyedLookupCache;
1370 friend class V8Context;
1371
1372 DISALLOW_COPY_AND_ASSIGN(KeyedLookupCacheData);
1373 };
1283 1374
1284 // Cache for mapping (map, property name) into field offset. 1375 // Cache for mapping (map, property name) into field offset.
1285 // Cleared at startup and prior to mark sweep collection. 1376 // Cleared at startup and prior to mark sweep collection.
1286 class KeyedLookupCache { 1377 class KeyedLookupCache {
1287 public: 1378 public:
1288 // Lookup field offset for (map, name). If absent, -1 is returned. 1379 // Lookup field offset for (map, name). If absent, -1 is returned.
1289 static int Lookup(Map* map, String* name); 1380 static int Lookup(Map* map, String* name);
1290 1381
1291 // Update an element in the cache. 1382 // Update an element in the cache.
1292 static void Update(Map* map, String* name, int field_offset); 1383 static void Update(Map* map, String* name, int field_offset);
1293 1384
1294 // Clear the cache. 1385 // Clear the cache.
1295 static void Clear(); 1386 static void Clear();
1296 private: 1387 private:
1297 static inline int Hash(Map* map, String* name); 1388 static inline int Hash(Map* map, String* name);
1389 };
1390
1391 class DescriptorLookupCacheData {
1298 static const int kLength = 64; 1392 static const int kLength = 64;
1299 struct Key { 1393 struct Key {
1300 Map* map; 1394 DescriptorArray* array;
1301 String* name; 1395 String* name;
1302 }; 1396 };
1303 static Key keys_[kLength]; 1397
1304 static int field_offsets_[kLength]; 1398 Key keys_[kLength];
1399 int results_[kLength];
1400
1401 DescriptorLookupCacheData() {
1402 for (int i = 0; i < kLength; ++i) {
1403 keys_[i].array = NULL;
1404 keys_[i].name = NULL;
1405 i[results_] = 0;
1406 }
1407 }
1408
1409 friend class DescriptorLookupCache;
1410 friend class V8Context;
1411 DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCacheData);
1305 }; 1412 };
1306 1413
1307
1308
1309 // Cache for mapping (array, property name) into descriptor index. 1414 // Cache for mapping (array, property name) into descriptor index.
1310 // The cache contains both positive and negative results. 1415 // The cache contains both positive and negative results.
1311 // Descriptor index equals kNotFound means the property is absent. 1416 // Descriptor index equals kNotFound means the property is absent.
1312 // Cleared at startup and prior to any gc. 1417 // Cleared at startup and prior to any gc.
1313 class DescriptorLookupCache { 1418 class DescriptorLookupCache {
1314 public: 1419 public:
1315 // Lookup descriptor index for (map, name). 1420 // Lookup descriptor index for (map, name).
1316 // If absent, kAbsent is returned. 1421 // If absent, kAbsent is returned.
1317 static int Lookup(DescriptorArray* array, String* name) { 1422 static int Lookup(DescriptorArray* array, String* name) {
1318 if (!StringShape(name).IsSymbol()) return kAbsent; 1423 if (!StringShape(name).IsSymbol()) return kAbsent;
1319 int index = Hash(array, name); 1424 int index = Hash(array, name);
1320 Key& key = keys_[index]; 1425 DescriptorLookupCacheData& descriptor_lookup_cache_data =
1321 if ((key.array == array) && (key.name == name)) return results_[index]; 1426 v8_context()->descriptor_lookup_cache_data_;
1427 DescriptorLookupCacheData::Key& key =
1428 descriptor_lookup_cache_data.keys_[index];
1429
1430 if ((key.array == array) && (key.name == name)) {
1431 return descriptor_lookup_cache_data.results_[index];
1432 }
1322 return kAbsent; 1433 return kAbsent;
1323 } 1434 }
1324 1435
1325 // Update an element in the cache. 1436 // Update an element in the cache.
1326 static void Update(DescriptorArray* array, String* name, int result) { 1437 static void Update(DescriptorArray* array, String* name, int result) {
1327 ASSERT(result != kAbsent); 1438 ASSERT(result != kAbsent);
1328 if (StringShape(name).IsSymbol()) { 1439 if (StringShape(name).IsSymbol()) {
1329 int index = Hash(array, name); 1440 int index = Hash(array, name);
1330 Key& key = keys_[index]; 1441 DescriptorLookupCacheData& descriptor_lookup_cache_data =
1442 v8_context()->descriptor_lookup_cache_data_;
1443 DescriptorLookupCacheData::Key& key =
1444 descriptor_lookup_cache_data.keys_[index];
1445
1331 key.array = array; 1446 key.array = array;
1332 key.name = name; 1447 key.name = name;
1333 results_[index] = result; 1448 descriptor_lookup_cache_data.results_[index] = result;
1334 } 1449 }
1335 } 1450 }
1336 1451
1337 // Clear the cache. 1452 // Clear the cache.
1338 static void Clear(); 1453 static void Clear();
1339 1454
1340 static const int kAbsent = -2; 1455 static const int kAbsent = -2;
1341 private: 1456 private:
1342 static int Hash(DescriptorArray* array, String* name) { 1457 static int Hash(DescriptorArray* array, String* name) {
1343 // Uses only lower 32 bits if pointers are larger. 1458 // Uses only lower 32 bits if pointers are larger.
1344 uintptr_t array_hash = 1459 uintptr_t array_hash =
1345 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2; 1460 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2;
1346 uintptr_t name_hash = 1461 uintptr_t name_hash =
1347 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2; 1462 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2;
1348 return (array_hash ^ name_hash) % kLength; 1463 return (array_hash ^ name_hash) % DescriptorLookupCacheData::kLength;
1349 } 1464 }
1350
1351 static const int kLength = 64;
1352 struct Key {
1353 DescriptorArray* array;
1354 String* name;
1355 };
1356
1357 static Key keys_[kLength];
1358 static int results_[kLength];
1359 }; 1465 };
1360 1466
1361 1467
1362 // ---------------------------------------------------------------------------- 1468 // ----------------------------------------------------------------------------
1363 // Marking stack for tracing live objects. 1469 // Marking stack for tracing live objects.
1364 1470
1365 class MarkingStack { 1471 class MarkingStack {
1366 public: 1472 public:
1367 void Initialize(Address low, Address high) { 1473 void Initialize(Address low, Address high) {
1368 top_ = low_ = reinterpret_cast<HeapObject**>(low); 1474 top_ = low_ = reinterpret_cast<HeapObject**>(low);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1412 // /* Allocation not allowed: we cannot handle a GC in this scope. */ 1518 // /* Allocation not allowed: we cannot handle a GC in this scope. */
1413 // { AssertNoAllocation nogc; 1519 // { AssertNoAllocation nogc;
1414 // ... 1520 // ...
1415 // } 1521 // }
1416 1522
1417 #ifdef DEBUG 1523 #ifdef DEBUG
1418 1524
1419 class DisallowAllocationFailure { 1525 class DisallowAllocationFailure {
1420 public: 1526 public:
1421 DisallowAllocationFailure() { 1527 DisallowAllocationFailure() {
1422 old_state_ = Heap::disallow_allocation_failure_; 1528 old_state_ = v8_context()->heap_data_.disallow_allocation_failure_;
1423 Heap::disallow_allocation_failure_ = true; 1529 v8_context()->heap_data_.disallow_allocation_failure_ = true;
1424 } 1530 }
1425 ~DisallowAllocationFailure() { 1531 ~DisallowAllocationFailure() {
1426 Heap::disallow_allocation_failure_ = old_state_; 1532 v8_context()->heap_data_.disallow_allocation_failure_ = old_state_;
1427 } 1533 }
1428 private: 1534 private:
1429 bool old_state_; 1535 bool old_state_;
1430 }; 1536 };
1431 1537
1432 class AssertNoAllocation { 1538 class AssertNoAllocation {
1433 public: 1539 public:
1434 AssertNoAllocation() { 1540 AssertNoAllocation() {
1435 old_state_ = Heap::allow_allocation(false); 1541 old_state_ = Heap::allow_allocation(false);
1436 } 1542 }
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
1531 // On a full GC, a count of the number of marked objects. Incremented 1637 // On a full GC, a count of the number of marked objects. Incremented
1532 // when an object is marked and decremented when an object's mark bit is 1638 // when an object is marked and decremented when an object's mark bit is
1533 // cleared. Will be zero on a scavenge collection. 1639 // cleared. Will be zero on a scavenge collection.
1534 int marked_count_; 1640 int marked_count_;
1535 1641
1536 // The count from the end of the previous full GC. Will be zero if there 1642 // The count from the end of the previous full GC. Will be zero if there
1537 // was no previous full GC. 1643 // was no previous full GC.
1538 int previous_marked_count_; 1644 int previous_marked_count_;
1539 }; 1645 };
1540 1646
1541 1647 class TranscendentalCacheTypes {
1542 class TranscendentalCache {
1543 public: 1648 public:
1544 enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches}; 1649 enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
1650 };
1545 1651
1652 class TranscendentalCache;
1653
1654 class TranscendentalCacheData: public TranscendentalCacheTypes {
1655 TranscendentalCache* caches_[kNumberOfCaches];
1656
1657 friend class TranscendentalCache;
1658 friend class V8Context;
1659
1660 TranscendentalCacheData() {
1661 for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
1662 }
1663 DISALLOW_COPY_AND_ASSIGN(TranscendentalCacheData);
1664 };
1665
1666 class TranscendentalCache: public TranscendentalCacheTypes {
1667 public:
1546 explicit TranscendentalCache(Type t); 1668 explicit TranscendentalCache(Type t);
1547 1669
1548 // Returns a heap number with f(input), where f is a math function specified 1670 // Returns a heap number with f(input), where f is a math function specified
1549 // by the 'type' argument. 1671 // by the 'type' argument.
1550 static inline Object* Get(Type type, double input) { 1672 static inline Object* Get(Type type, double input) {
1551 TranscendentalCache* cache = caches_[type]; 1673 TranscendentalCacheData& transcendental_cache_data =
1674 v8_context()->transcendental_cache_data_;
1675 TranscendentalCache* cache = transcendental_cache_data.caches_[type];
1552 if (cache == NULL) { 1676 if (cache == NULL) {
1553 caches_[type] = cache = new TranscendentalCache(type); 1677 transcendental_cache_data.caches_[type] = cache =
1678 new TranscendentalCache(type);
1554 } 1679 }
1555 return cache->Get(input); 1680 return cache->Get(input);
1556 } 1681 }
1557 1682
1558 // The cache contains raw Object pointers. This method disposes of 1683 // The cache contains raw Object pointers. This method disposes of
1559 // them before a garbage collection. 1684 // them before a garbage collection.
1560 static void Clear(); 1685 static void Clear();
1561 1686
1562 private: 1687 private:
1563 inline Object* Get(double input) { 1688 inline Object* Get(double input) {
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
1610 union Converter { 1735 union Converter {
1611 double dbl; 1736 double dbl;
1612 uint32_t integers[2]; 1737 uint32_t integers[2];
1613 }; 1738 };
1614 inline static int Hash(const Converter& c) { 1739 inline static int Hash(const Converter& c) {
1615 uint32_t hash = (c.integers[0] ^ c.integers[1]); 1740 uint32_t hash = (c.integers[0] ^ c.integers[1]);
1616 hash ^= hash >> 16; 1741 hash ^= hash >> 16;
1617 hash ^= hash >> 8; 1742 hash ^= hash >> 8;
1618 return (hash & (kCacheSize - 1)); 1743 return (hash & (kCacheSize - 1));
1619 } 1744 }
1620 static TranscendentalCache* caches_[kNumberOfCaches];
1621 Element elements_[kCacheSize]; 1745 Element elements_[kCacheSize];
1622 Type type_; 1746 Type type_;
1623 }; 1747 };
1624 1748
1625 1749
1626 } } // namespace v8::internal 1750 } } // namespace v8::internal
1627 1751
1628 #endif // V8_HEAP_H_ 1752 #endif // V8_HEAP_H_
OLDNEW
« no previous file with comments | « src/handles.cc ('k') | src/heap.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698