Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(357)

Side by Side Diff: Source/platform/heap/Heap.h

Issue 1157933002: Oilpan: introduce eager finalization. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: round of improvements Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 368 matching lines...) Expand 10 before | Expand all | Expand 10 after
379 BasePage* next() const { return m_next; } 379 BasePage* next() const { return m_next; }
380 380
381 // virtual methods are slow. So performance-sensitive methods 381 // virtual methods are slow. So performance-sensitive methods
382 // should be defined as non-virtual methods on NormalPage and LargeObjectPag e. 382 // should be defined as non-virtual methods on NormalPage and LargeObjectPag e.
383 // The following methods are not performance-sensitive. 383 // The following methods are not performance-sensitive.
384 virtual size_t objectPayloadSizeForTesting() = 0; 384 virtual size_t objectPayloadSizeForTesting() = 0;
385 virtual bool isEmpty() = 0; 385 virtual bool isEmpty() = 0;
386 virtual void removeFromHeap() = 0; 386 virtual void removeFromHeap() = 0;
387 virtual void sweep() = 0; 387 virtual void sweep() = 0;
388 virtual void makeConsistentForGC() = 0; 388 virtual void makeConsistentForGC() = 0;
389
389 #if defined(ADDRESS_SANITIZER) 390 #if defined(ADDRESS_SANITIZER)
390 virtual void poisonUnmarkedObjects() = 0; 391 virtual void poisonObjects(ObjectsToPoison, Poisoning) = 0;
391 #endif 392 #endif
392 // Check if the given address points to an object in this 393 // Check if the given address points to an object in this
393 // heap page. If so, find the start of that object and mark it 394 // heap page. If so, find the start of that object and mark it
394 // using the given Visitor. Otherwise do nothing. The pointer must 395 // using the given Visitor. Otherwise do nothing. The pointer must
395 // be within the same aligned blinkPageSize as the this-pointer. 396 // be within the same aligned blinkPageSize as the this-pointer.
396 // 397 //
397 // This is used during conservative stack scanning to 398 // This is used during conservative stack scanning to
398 // conservatively mark all objects that could be referenced from 399 // conservatively mark all objects that could be referenced from
399 // the stack. 400 // the stack.
400 virtual void checkAndMarkPointer(Visitor*, Address) = 0; 401 virtual void checkAndMarkPointer(Visitor*, Address) = 0;
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
467 { 468 {
468 return payload() <= address && address < payloadEnd(); 469 return payload() <= address && address < payloadEnd();
469 } 470 }
470 471
471 virtual size_t objectPayloadSizeForTesting() override; 472 virtual size_t objectPayloadSizeForTesting() override;
472 virtual bool isEmpty() override; 473 virtual bool isEmpty() override;
473 virtual void removeFromHeap() override; 474 virtual void removeFromHeap() override;
474 virtual void sweep() override; 475 virtual void sweep() override;
475 virtual void makeConsistentForGC() override; 476 virtual void makeConsistentForGC() override;
476 #if defined(ADDRESS_SANITIZER) 477 #if defined(ADDRESS_SANITIZER)
477 virtual void poisonUnmarkedObjects() override; 478 virtual void poisonObjects(ObjectsToPoison, Poisoning) override;
478 #endif 479 #endif
479 virtual void checkAndMarkPointer(Visitor*, Address) override; 480 virtual void checkAndMarkPointer(Visitor*, Address) override;
480 virtual void markOrphaned() override; 481 virtual void markOrphaned() override;
481 #if ENABLE(GC_PROFILING) 482 #if ENABLE(GC_PROFILING)
482 const GCInfo* findGCInfo(Address) override; 483 const GCInfo* findGCInfo(Address) override;
483 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; 484 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override;
484 void incrementMarkedObjectsAge() override; 485 void incrementMarkedObjectsAge() override;
485 void countMarkedObjects(ClassAgeCountsMap&) override; 486 void countMarkedObjects(ClassAgeCountsMap&) override;
486 void countObjectsToSweep(ClassAgeCountsMap&) override; 487 void countObjectsToSweep(ClassAgeCountsMap&) override;
487 #endif 488 #endif
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
529 { 530 {
530 return payload() <= address && address < payloadEnd(); 531 return payload() <= address && address < payloadEnd();
531 } 532 }
532 533
533 virtual size_t objectPayloadSizeForTesting() override; 534 virtual size_t objectPayloadSizeForTesting() override;
534 virtual bool isEmpty() override; 535 virtual bool isEmpty() override;
535 virtual void removeFromHeap() override; 536 virtual void removeFromHeap() override;
536 virtual void sweep() override; 537 virtual void sweep() override;
537 virtual void makeConsistentForGC() override; 538 virtual void makeConsistentForGC() override;
538 #if defined(ADDRESS_SANITIZER) 539 #if defined(ADDRESS_SANITIZER)
539 virtual void poisonUnmarkedObjects() override; 540 virtual void poisonObjects(ObjectsToPoison, Poisoning) override;
540 #endif 541 #endif
541 virtual void checkAndMarkPointer(Visitor*, Address) override; 542 virtual void checkAndMarkPointer(Visitor*, Address) override;
542 virtual void markOrphaned() override; 543 virtual void markOrphaned() override;
543 544
544 #if ENABLE(GC_PROFILING) 545 #if ENABLE(GC_PROFILING)
545 const GCInfo* findGCInfo(Address) override; 546 const GCInfo* findGCInfo(Address) override;
546 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; 547 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override;
547 void incrementMarkedObjectsAge() override; 548 void incrementMarkedObjectsAge() override;
548 void countMarkedObjects(ClassAgeCountsMap&) override; 549 void countMarkedObjects(ClassAgeCountsMap&) override;
549 void countObjectsToSweep(ClassAgeCountsMap&) override; 550 void countObjectsToSweep(ClassAgeCountsMap&) override;
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
694 virtual void clearFreeLists() { } 695 virtual void clearFreeLists() { }
695 void makeConsistentForGC(); 696 void makeConsistentForGC();
696 #if ENABLE(ASSERT) 697 #if ENABLE(ASSERT)
697 virtual bool isConsistentForGC() = 0; 698 virtual bool isConsistentForGC() = 0;
698 #endif 699 #endif
699 size_t objectPayloadSizeForTesting(); 700 size_t objectPayloadSizeForTesting();
700 void prepareHeapForTermination(); 701 void prepareHeapForTermination();
701 void prepareForSweep(); 702 void prepareForSweep();
702 #if defined(ADDRESS_SANITIZER) 703 #if defined(ADDRESS_SANITIZER)
703 void poisonUnmarkedObjects(); 704 void poisonUnmarkedObjects();
705 void poisonHeap(Poisoning);
704 #endif 706 #endif
705 Address lazySweep(size_t, size_t gcInfoIndex); 707 Address lazySweep(size_t, size_t gcInfoIndex);
706 void sweepUnsweptPage(); 708 void sweepUnsweptPage();
707 // Returns true if we have swept all pages within the deadline. 709 // Returns true if we have swept all pages within the deadline.
708 // Returns false otherwise. 710 // Returns false otherwise.
709 bool lazySweepWithDeadline(double deadlineSeconds); 711 bool lazySweepWithDeadline(double deadlineSeconds);
710 void completeSweep(); 712 void completeSweep();
711 713
712 ThreadState* threadState() { return m_threadState; } 714 ThreadState* threadState() { return m_threadState; }
713 int heapIndex() const { return m_index; } 715 int heapIndex() const { return m_index; }
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after
895 size_t allocationSize = size + sizeof(HeapObjectHeader); 897 size_t allocationSize = size + sizeof(HeapObjectHeader);
896 // Align size with allocation granularity. 898 // Align size with allocation granularity.
897 allocationSize = (allocationSize + allocationMask) & ~allocationMask; 899 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
898 return allocationSize; 900 return allocationSize;
899 } 901 }
900 static inline size_t roundedAllocationSize(size_t size) 902 static inline size_t roundedAllocationSize(size_t size)
901 { 903 {
902 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); 904 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader);
903 } 905 }
904 static Address allocateOnHeapIndex(ThreadState*, size_t, int heapIndex, size _t gcInfoIndex); 906 static Address allocateOnHeapIndex(ThreadState*, size_t, int heapIndex, size _t gcInfoIndex);
905 template<typename T> static Address allocate(size_t); 907 template<typename T> static Address allocate(size_t, bool eagerlySweep = fal se);
906 template<typename T> static Address reallocate(void* previous, size_t); 908 template<typename T> static Address reallocate(void* previous, size_t);
907 909
908 enum GCReason { 910 enum GCReason {
909 IdleGC, 911 IdleGC,
910 PreciseGC, 912 PreciseGC,
911 ConservativeGC, 913 ConservativeGC,
912 ForcedGC, 914 ForcedGC,
913 NumberOfGCReason 915 NumberOfGCReason
914 }; 916 };
915 static const char* gcReasonString(GCReason); 917 static const char* gcReasonString(GCReason);
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
1001 static void remove(PageMemoryRegion*, RegionTree**); 1003 static void remove(PageMemoryRegion*, RegionTree**);
1002 private: 1004 private:
1003 PageMemoryRegion* m_region; 1005 PageMemoryRegion* m_region;
1004 RegionTree* m_left; 1006 RegionTree* m_left;
1005 RegionTree* m_right; 1007 RegionTree* m_right;
1006 }; 1008 };
1007 1009
1008 // Reset counters that track live and allocated-since-last-GC sizes. 1010 // Reset counters that track live and allocated-since-last-GC sizes.
1009 static void resetHeapCounters(); 1011 static void resetHeapCounters();
1010 1012
1013 static int heapIndexForObjectSize(size_t);
1014 static bool isNormalHeapIndex(int);
1015
1011 static Visitor* s_markingVisitor; 1016 static Visitor* s_markingVisitor;
1012 static CallbackStack* s_markingStack; 1017 static CallbackStack* s_markingStack;
1013 static CallbackStack* s_postMarkingCallbackStack; 1018 static CallbackStack* s_postMarkingCallbackStack;
1014 static CallbackStack* s_globalWeakCallbackStack; 1019 static CallbackStack* s_globalWeakCallbackStack;
1015 static CallbackStack* s_ephemeronStack; 1020 static CallbackStack* s_ephemeronStack;
1016 static HeapDoesNotContainCache* s_heapDoesNotContainCache; 1021 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
1017 static bool s_shutdownCalled; 1022 static bool s_shutdownCalled;
1018 static bool s_lastGCWasConservative; 1023 static bool s_lastGCWasConservative;
1019 static FreePagePool* s_freePagePool; 1024 static FreePagePool* s_freePagePool;
1020 static OrphanedPagePool* s_orphanedPagePool; 1025 static OrphanedPagePool* s_orphanedPagePool;
1021 static RegionTree* s_regionTree; 1026 static RegionTree* s_regionTree;
1022 static size_t s_allocatedSpace; 1027 static size_t s_allocatedSpace;
1023 static size_t s_allocatedObjectSize; 1028 static size_t s_allocatedObjectSize;
1024 static size_t s_markedObjectSize; 1029 static size_t s_markedObjectSize;
1025 static size_t s_estimatedLiveObjectSize; 1030 static size_t s_estimatedLiveObjectSize;
1026 static size_t s_externalObjectSizeAtLastGC; 1031 static size_t s_externalObjectSizeAtLastGC;
1027 static double s_estimatedMarkingTimePerByte; 1032 static double s_estimatedMarkingTimePerByte;
1028 1033
1029 friend class ThreadState; 1034 friend class ThreadState;
1030 }; 1035 };
1031 1036
1037 template<typename T>
1038 struct IsEagerlyFinalizedType {
1039 private:
1040 typedef char YesType;
1041 struct NoType {
1042 char padding[8];
1043 };
1044
1045 template <typename U> static YesType checkMarker(typename U::IsEagerlyFinali zedMarker*);
1046 template <typename U> static NoType checkMarker(...);
1047
1048 public:
1049 static const bool value = sizeof(checkMarker<T>(nullptr)) == sizeof(YesType) ;
1050 };
1051
1032 template<typename T> class GarbageCollected { 1052 template<typename T> class GarbageCollected {
1033 WTF_MAKE_NONCOPYABLE(GarbageCollected); 1053 WTF_MAKE_NONCOPYABLE(GarbageCollected);
1034 1054
1035 // For now direct allocation of arrays on the heap is not allowed. 1055 // For now direct allocation of arrays on the heap is not allowed.
1036 void* operator new[](size_t size); 1056 void* operator new[](size_t size);
1037 1057
1038 #if OS(WIN) && COMPILER(MSVC) 1058 #if OS(WIN) && COMPILER(MSVC)
1039 // Due to some quirkiness in the MSVC compiler we have to provide 1059 // Due to some quirkiness in the MSVC compiler we have to provide
1040 // the delete[] operator in the GarbageCollected subclasses as it 1060 // the delete[] operator in the GarbageCollected subclasses as it
1041 // is called when a class is exported in a DLL. 1061 // is called when a class is exported in a DLL.
1042 protected: 1062 protected:
1043 void operator delete[](void* p) 1063 void operator delete[](void* p)
1044 { 1064 {
1045 ASSERT_NOT_REACHED(); 1065 ASSERT_NOT_REACHED();
1046 } 1066 }
1047 #else 1067 #else
1048 void operator delete[](void* p); 1068 void operator delete[](void* p);
1049 #endif 1069 #endif
1050 1070
1051 public: 1071 public:
1052 using GarbageCollectedBase = T; 1072 using GarbageCollectedBase = T;
1053 1073
1054 void* operator new(size_t size) 1074 void* operator new(size_t size)
1055 { 1075 {
1056 return allocateObject(size); 1076 return allocateObject(size, IsEagerlyFinalizedType<T>::value);
1057 } 1077 }
1058 1078
1059 static void* allocateObject(size_t size) 1079 static void* allocateObject(size_t size, bool eagerlySweep)
1060 { 1080 {
1061 return Heap::allocate<T>(size); 1081 return Heap::allocate<T>(size, eagerlySweep);
1062 } 1082 }
1063 1083
1064 void operator delete(void* p) 1084 void operator delete(void* p)
1065 { 1085 {
1066 ASSERT_NOT_REACHED(); 1086 ASSERT_NOT_REACHED();
1067 } 1087 }
1068 1088
1069 protected: 1089 protected:
1070 GarbageCollected() 1090 GarbageCollected()
1071 { 1091 {
1072 } 1092 }
1073 }; 1093 };
1074 1094
1075 // Assigning class types to their heaps. 1095 // Assigning class types to their heaps.
1076 // 1096 //
1077 // We use sized heaps for most 'normal' objcts to improve memory locality. 1097 // We use sized heaps for most 'normal' objects to improve memory locality.
1078 // It seems that the same type of objects are likely to be accessed together, 1098 // It seems that the same type of objects are likely to be accessed together,
1079 // which means that we want to group objects by type. That's one reason 1099 // which means that we want to group objects by type. That's one reason
1080 // why we provide dedicated heaps for popular types (e.g., Node, CSSValue), 1100 // why we provide dedicated heaps for popular types (e.g., Node, CSSValue),
1081 // but it's not practical to prepare dedicated heaps for all types. 1101 // but it's not practical to prepare dedicated heaps for all types.
1082 // Thus we group objects by their sizes, hoping that this will approximately 1102 // Thus we group objects by their sizes, hoping that this will approximately
1083 // group objects by their types. 1103 // group objects by their types.
1084 // 1104 //
1085 // An exception to the use of sized heaps is made for class types that 1105 // An exception to the use of sized heaps is made for class types that
1086 // require prompt finalization after a garbage collection. That is, their 1106 // require prompt finalization after a garbage collection. That is, their
1087 // instances have to be finalized early and cannot be delayed until lazy 1107 // instances have to be finalized early and cannot be delayed until lazy
1088 // sweeping kicks in for their heap and page. The EAGERLY_FINALIZE() 1108 // sweeping kicks in for their heap and page. The EAGERLY_FINALIZE()
1089 // macro is used to declare a class (and its derived classes) as being 1109 // macro is used to declare a class (and its derived classes) as being
1090 // in need of eagerly finalized. Must be defined with 'public' visibility 1110 // in need of eager finalization. Must be defined with 'public' visibility
1091 // for a class. 1111 // for a class.
1092 // 1112 //
1093 template<typename T, typename Enabled = void> 1113
1094 class HeapIndexTrait { 1114 inline int Heap::heapIndexForObjectSize(size_t size)
1095 public: 1115 {
1096 static int heapIndexForObject(size_t size) 1116 if (size < 64) {
1097 { 1117 if (size < 32)
1098 if (size < 64) { 1118 return NormalPage1HeapIndex;
1099 if (size < 32) 1119 return NormalPage2HeapIndex;
1100 return NormalPage1HeapIndex;
1101 return NormalPage2HeapIndex;
1102 }
1103 if (size < 128)
1104 return NormalPage3HeapIndex;
1105 return NormalPage4HeapIndex;
1106 } 1120 }
1107 }; 1121 if (size < 128)
1122 return NormalPage3HeapIndex;
1123 return NormalPage4HeapIndex;
1124 }
1125
1126 inline bool Heap::isNormalHeapIndex(int index)
1127 {
1128 return index >= NormalPage1HeapIndex && index <= NormalPage4HeapIndex;
1129 }
1108 1130
1109 #if ENABLE_LAZY_SWEEPING 1131 #if ENABLE_LAZY_SWEEPING
1110 #define EAGERLY_FINALIZE() typedef int IsEagerlyFinalizedMarker 1132 #define EAGERLY_FINALIZE() typedef int IsEagerlyFinalizedMarker
1111 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() 1133 #define EAGERLY_FINALIZE_WILL_BE_REMOVED()
1112 #else 1134 #else
1113 #define EAGERLY_FINALIZE() 1135 #define EAGERLY_FINALIZE()
1114 // TODO(Oilpan): define in terms of Oilpan's EAGERLY_FINALIZE() once lazy 1136 // TODO(Oilpan): define in terms of Oilpan's EAGERLY_FINALIZE() once lazy
1115 // sweeping is enabled non-Oilpan. 1137 // sweeping is enabled non-Oilpan.
1116 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() 1138 #define EAGERLY_FINALIZE_WILL_BE_REMOVED()
1117 #endif 1139 #endif
1118 1140
1119 template<typename T>
1120 struct IsEagerlyFinalizedType {
1121 private:
1122 typedef char YesType;
1123 struct NoType {
1124 char padding[8];
1125 };
1126
1127 template <typename U> static YesType checkMarker(typename U::IsEagerlyFinali zedMarker*);
1128 template <typename U> static NoType checkMarker(...);
1129
1130 public:
1131 static const bool value = sizeof(checkMarker<T>(nullptr)) == sizeof(YesType) ;
1132 };
1133
1134 template<typename T>
1135 class HeapIndexTrait<T, typename WTF::EnableIf<IsEagerlyFinalizedType<T>::value> ::Type> {
1136 public:
1137 static int heapIndexForObject(size_t)
1138 {
1139 return EagerSweepHeapIndex;
1140 }
1141 };
1142
1143 NO_SANITIZE_ADDRESS inline 1141 NO_SANITIZE_ADDRESS inline
1144 size_t HeapObjectHeader::size() const 1142 size_t HeapObjectHeader::size() const
1145 { 1143 {
1146 size_t result = m_encoded & headerSizeMask; 1144 size_t result = m_encoded & headerSizeMask;
1147 // Large objects should not refer to header->size(). 1145 // Large objects should not refer to header->size().
1148 // The actual size of a large object is stored in 1146 // The actual size of a large object is stored in
1149 // LargeObjectPage::m_payloadSize. 1147 // LargeObjectPage::m_payloadSize.
1150 ASSERT(result != largeObjectSizeInHeader); 1148 ASSERT(result != largeObjectSizeInHeader);
1151 ASSERT(!pageFromObject(this)->isLargeObjectPage()); 1149 ASSERT(!pageFromObject(this)->isLargeObjectPage());
1152 return result; 1150 return result;
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
1257 1255
1258 inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he apIndex, size_t gcInfoIndex) 1256 inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he apIndex, size_t gcInfoIndex)
1259 { 1257 {
1260 ASSERT(state->isAllocationAllowed()); 1258 ASSERT(state->isAllocationAllowed());
1261 ASSERT(heapIndex != LargeObjectHeapIndex); 1259 ASSERT(heapIndex != LargeObjectHeapIndex);
1262 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); 1260 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex));
1263 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); 1261 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex);
1264 } 1262 }
1265 1263
1266 template<typename T> 1264 template<typename T>
1267 Address Heap::allocate(size_t size) 1265 Address Heap::allocate(size_t size, bool eagerlySweep)
1268 { 1266 {
1269 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); 1267 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1270 return Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>::heapIndexFo rObject(size), GCInfoTrait<T>::index()); 1268 return Heap::allocateOnHeapIndex(state, size, eagerlySweep ? EagerSweepHeapI ndex : Heap::heapIndexForObjectSize(size), GCInfoTrait<T>::index());
1271 } 1269 }
1272 1270
1273 template<typename T> 1271 template<typename T>
1274 Address Heap::reallocate(void* previous, size_t size) 1272 Address Heap::reallocate(void* previous, size_t size)
1275 { 1273 {
1274 // Not intended to be a full C realloc() substitute;
1275 // realloc(nullptr, size) is not a supported alias for malloc(size).
1276
1277 // TODO(sof): promptly free the previous object.
1276 if (!size) { 1278 if (!size) {
1277 // If the new size is 0 this is equivalent to either free(previous) or 1279 // If the new size is 0 this is considered equivalent to free(previous).
1278 // malloc(0). In both cases we do nothing and return nullptr.
1279 return nullptr; 1280 return nullptr;
1280 } 1281 }
1282
1281 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); 1283 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1282 // TODO(haraken): reallocate() should use the heap that the original object
1283 // is using. This won't be a big deal since reallocate() is rarely used.
1284 Address address = Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>:: heapIndexForObject(size), GCInfoTrait<T>::index());
1285 if (!previous) {
1286 // This is equivalent to malloc(size).
1287 return address;
1288 }
1289 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); 1284 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous);
1285 BasePage* page = pageFromObject(previousHeader);
1286 ASSERT(page);
1287 int heapIndex = page->heap()->heapIndex();
1288 // Recompute the effective heap index if previous allocation
1289 // was on the normal heaps or a large object.
1290 if (isNormalHeapIndex(heapIndex) || heapIndex == LargeObjectHeapIndex)
1291 heapIndex = heapIndexForObjectSize(size);
1292
1290 // TODO(haraken): We don't support reallocate() for finalizable objects. 1293 // TODO(haraken): We don't support reallocate() for finalizable objects.
1291 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); 1294 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer());
1292 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); 1295 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index());
1296 Address address = Heap::allocateOnHeapIndex(state, size, heapIndex, GCInfoTr ait<T>::index());
1293 size_t copySize = previousHeader->payloadSize(); 1297 size_t copySize = previousHeader->payloadSize();
1294 if (copySize > size) 1298 if (copySize > size)
1295 copySize = size; 1299 copySize = size;
1296 memcpy(address, previous, copySize); 1300 memcpy(address, previous, copySize);
1297 return address; 1301 return address;
1298 } 1302 }
1299 1303
1300 } // namespace blink 1304 } // namespace blink
1301 1305
1302 #endif // Heap_h 1306 #endif // Heap_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698