Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(42)

Side by Side Diff: Source/platform/heap/Heap.h

Issue 1157933002: Oilpan: introduce eager finalization. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Parameterize Heap::poisonHeap() over ObjectsToPoison Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/GarbageCollected.h ('k') | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 368 matching lines...) Expand 10 before | Expand all | Expand 10 after
379 BasePage* next() const { return m_next; } 379 BasePage* next() const { return m_next; }
380 380
381 // virtual methods are slow. So performance-sensitive methods 381 // virtual methods are slow. So performance-sensitive methods
382 // should be defined as non-virtual methods on NormalPage and LargeObjectPag e. 382 // should be defined as non-virtual methods on NormalPage and LargeObjectPag e.
383 // The following methods are not performance-sensitive. 383 // The following methods are not performance-sensitive.
384 virtual size_t objectPayloadSizeForTesting() = 0; 384 virtual size_t objectPayloadSizeForTesting() = 0;
385 virtual bool isEmpty() = 0; 385 virtual bool isEmpty() = 0;
386 virtual void removeFromHeap() = 0; 386 virtual void removeFromHeap() = 0;
387 virtual void sweep() = 0; 387 virtual void sweep() = 0;
388 virtual void makeConsistentForGC() = 0; 388 virtual void makeConsistentForGC() = 0;
389
389 #if defined(ADDRESS_SANITIZER) 390 #if defined(ADDRESS_SANITIZER)
390 virtual void poisonUnmarkedObjects() = 0; 391 virtual void poisonObjects(ObjectsToPoison, Poisoning) = 0;
391 #endif 392 #endif
392 // Check if the given address points to an object in this 393 // Check if the given address points to an object in this
393 // heap page. If so, find the start of that object and mark it 394 // heap page. If so, find the start of that object and mark it
394 // using the given Visitor. Otherwise do nothing. The pointer must 395 // using the given Visitor. Otherwise do nothing. The pointer must
395 // be within the same aligned blinkPageSize as the this-pointer. 396 // be within the same aligned blinkPageSize as the this-pointer.
396 // 397 //
397 // This is used during conservative stack scanning to 398 // This is used during conservative stack scanning to
398 // conservatively mark all objects that could be referenced from 399 // conservatively mark all objects that could be referenced from
399 // the stack. 400 // the stack.
400 virtual void checkAndMarkPointer(Visitor*, Address) = 0; 401 virtual void checkAndMarkPointer(Visitor*, Address) = 0;
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
467 { 468 {
468 return payload() <= address && address < payloadEnd(); 469 return payload() <= address && address < payloadEnd();
469 } 470 }
470 471
471 virtual size_t objectPayloadSizeForTesting() override; 472 virtual size_t objectPayloadSizeForTesting() override;
472 virtual bool isEmpty() override; 473 virtual bool isEmpty() override;
473 virtual void removeFromHeap() override; 474 virtual void removeFromHeap() override;
474 virtual void sweep() override; 475 virtual void sweep() override;
475 virtual void makeConsistentForGC() override; 476 virtual void makeConsistentForGC() override;
476 #if defined(ADDRESS_SANITIZER) 477 #if defined(ADDRESS_SANITIZER)
477 virtual void poisonUnmarkedObjects() override; 478 virtual void poisonObjects(ObjectsToPoison, Poisoning) override;
478 #endif 479 #endif
479 virtual void checkAndMarkPointer(Visitor*, Address) override; 480 virtual void checkAndMarkPointer(Visitor*, Address) override;
480 virtual void markOrphaned() override; 481 virtual void markOrphaned() override;
481 #if ENABLE(GC_PROFILING) 482 #if ENABLE(GC_PROFILING)
482 const GCInfo* findGCInfo(Address) override; 483 const GCInfo* findGCInfo(Address) override;
483 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; 484 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override;
484 void incrementMarkedObjectsAge() override; 485 void incrementMarkedObjectsAge() override;
485 void countMarkedObjects(ClassAgeCountsMap&) override; 486 void countMarkedObjects(ClassAgeCountsMap&) override;
486 void countObjectsToSweep(ClassAgeCountsMap&) override; 487 void countObjectsToSweep(ClassAgeCountsMap&) override;
487 #endif 488 #endif
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
529 { 530 {
530 return payload() <= address && address < payloadEnd(); 531 return payload() <= address && address < payloadEnd();
531 } 532 }
532 533
533 virtual size_t objectPayloadSizeForTesting() override; 534 virtual size_t objectPayloadSizeForTesting() override;
534 virtual bool isEmpty() override; 535 virtual bool isEmpty() override;
535 virtual void removeFromHeap() override; 536 virtual void removeFromHeap() override;
536 virtual void sweep() override; 537 virtual void sweep() override;
537 virtual void makeConsistentForGC() override; 538 virtual void makeConsistentForGC() override;
538 #if defined(ADDRESS_SANITIZER) 539 #if defined(ADDRESS_SANITIZER)
539 virtual void poisonUnmarkedObjects() override; 540 virtual void poisonObjects(ObjectsToPoison, Poisoning) override;
540 #endif 541 #endif
541 virtual void checkAndMarkPointer(Visitor*, Address) override; 542 virtual void checkAndMarkPointer(Visitor*, Address) override;
542 virtual void markOrphaned() override; 543 virtual void markOrphaned() override;
543 544
544 #if ENABLE(GC_PROFILING) 545 #if ENABLE(GC_PROFILING)
545 const GCInfo* findGCInfo(Address) override; 546 const GCInfo* findGCInfo(Address) override;
546 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; 547 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override;
547 void incrementMarkedObjectsAge() override; 548 void incrementMarkedObjectsAge() override;
548 void countMarkedObjects(ClassAgeCountsMap&) override; 549 void countMarkedObjects(ClassAgeCountsMap&) override;
549 void countObjectsToSweep(ClassAgeCountsMap&) override; 550 void countObjectsToSweep(ClassAgeCountsMap&) override;
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
693 694
694 virtual void clearFreeLists() { } 695 virtual void clearFreeLists() { }
695 void makeConsistentForGC(); 696 void makeConsistentForGC();
696 #if ENABLE(ASSERT) 697 #if ENABLE(ASSERT)
697 virtual bool isConsistentForGC() = 0; 698 virtual bool isConsistentForGC() = 0;
698 #endif 699 #endif
699 size_t objectPayloadSizeForTesting(); 700 size_t objectPayloadSizeForTesting();
700 void prepareHeapForTermination(); 701 void prepareHeapForTermination();
701 void prepareForSweep(); 702 void prepareForSweep();
702 #if defined(ADDRESS_SANITIZER) 703 #if defined(ADDRESS_SANITIZER)
703 void poisonUnmarkedObjects(); 704 void poisonHeap(ObjectsToPoison, Poisoning);
704 #endif 705 #endif
705 Address lazySweep(size_t, size_t gcInfoIndex); 706 Address lazySweep(size_t, size_t gcInfoIndex);
706 void sweepUnsweptPage(); 707 void sweepUnsweptPage();
707 // Returns true if we have swept all pages within the deadline. 708 // Returns true if we have swept all pages within the deadline.
708 // Returns false otherwise. 709 // Returns false otherwise.
709 bool lazySweepWithDeadline(double deadlineSeconds); 710 bool lazySweepWithDeadline(double deadlineSeconds);
710 void completeSweep(); 711 void completeSweep();
711 712
712 ThreadState* threadState() { return m_threadState; } 713 ThreadState* threadState() { return m_threadState; }
713 int heapIndex() const { return m_index; } 714 int heapIndex() const { return m_index; }
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after
895 size_t allocationSize = size + sizeof(HeapObjectHeader); 896 size_t allocationSize = size + sizeof(HeapObjectHeader);
896 // Align size with allocation granularity. 897 // Align size with allocation granularity.
897 allocationSize = (allocationSize + allocationMask) & ~allocationMask; 898 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
898 return allocationSize; 899 return allocationSize;
899 } 900 }
900 static inline size_t roundedAllocationSize(size_t size) 901 static inline size_t roundedAllocationSize(size_t size)
901 { 902 {
902 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); 903 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader);
903 } 904 }
904 static Address allocateOnHeapIndex(ThreadState*, size_t, int heapIndex, size _t gcInfoIndex); 905 static Address allocateOnHeapIndex(ThreadState*, size_t, int heapIndex, size _t gcInfoIndex);
905 template<typename T> static Address allocate(size_t); 906 template<typename T> static Address allocate(size_t, bool eagerlySweep = fal se);
906 template<typename T> static Address reallocate(void* previous, size_t); 907 template<typename T> static Address reallocate(void* previous, size_t);
907 908
908 enum GCReason { 909 enum GCReason {
909 IdleGC, 910 IdleGC,
910 PreciseGC, 911 PreciseGC,
911 ConservativeGC, 912 ConservativeGC,
912 ForcedGC, 913 ForcedGC,
913 NumberOfGCReason 914 NumberOfGCReason
914 }; 915 };
915 static const char* gcReasonString(GCReason); 916 static const char* gcReasonString(GCReason);
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
1001 static void remove(PageMemoryRegion*, RegionTree**); 1002 static void remove(PageMemoryRegion*, RegionTree**);
1002 private: 1003 private:
1003 PageMemoryRegion* m_region; 1004 PageMemoryRegion* m_region;
1004 RegionTree* m_left; 1005 RegionTree* m_left;
1005 RegionTree* m_right; 1006 RegionTree* m_right;
1006 }; 1007 };
1007 1008
1008 // Reset counters that track live and allocated-since-last-GC sizes. 1009 // Reset counters that track live and allocated-since-last-GC sizes.
1009 static void resetHeapCounters(); 1010 static void resetHeapCounters();
1010 1011
1012 static int heapIndexForObjectSize(size_t);
1013 static bool isNormalHeapIndex(int);
1014
1011 static Visitor* s_markingVisitor; 1015 static Visitor* s_markingVisitor;
1012 static CallbackStack* s_markingStack; 1016 static CallbackStack* s_markingStack;
1013 static CallbackStack* s_postMarkingCallbackStack; 1017 static CallbackStack* s_postMarkingCallbackStack;
1014 static CallbackStack* s_globalWeakCallbackStack; 1018 static CallbackStack* s_globalWeakCallbackStack;
1015 static CallbackStack* s_ephemeronStack; 1019 static CallbackStack* s_ephemeronStack;
1016 static HeapDoesNotContainCache* s_heapDoesNotContainCache; 1020 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
1017 static bool s_shutdownCalled; 1021 static bool s_shutdownCalled;
1018 static bool s_lastGCWasConservative; 1022 static bool s_lastGCWasConservative;
1019 static FreePagePool* s_freePagePool; 1023 static FreePagePool* s_freePagePool;
1020 static OrphanedPagePool* s_orphanedPagePool; 1024 static OrphanedPagePool* s_orphanedPagePool;
1021 static RegionTree* s_regionTree; 1025 static RegionTree* s_regionTree;
1022 static size_t s_allocatedSpace; 1026 static size_t s_allocatedSpace;
1023 static size_t s_allocatedObjectSize; 1027 static size_t s_allocatedObjectSize;
1024 static size_t s_markedObjectSize; 1028 static size_t s_markedObjectSize;
1025 static size_t s_estimatedLiveObjectSize; 1029 static size_t s_estimatedLiveObjectSize;
1026 static size_t s_externalObjectSizeAtLastGC; 1030 static size_t s_externalObjectSizeAtLastGC;
1027 static double s_estimatedMarkingTimePerByte; 1031 static double s_estimatedMarkingTimePerByte;
1028 1032
1029 friend class ThreadState; 1033 friend class ThreadState;
1030 }; 1034 };
1031 1035
1036 template<typename T>
1037 struct IsEagerlyFinalizedType {
1038 private:
1039 typedef char YesType;
1040 struct NoType {
1041 char padding[8];
1042 };
1043
1044 template <typename U> static YesType checkMarker(typename U::IsEagerlyFinali zedMarker*);
1045 template <typename U> static NoType checkMarker(...);
1046
1047 public:
1048 static const bool value = sizeof(checkMarker<T>(nullptr)) == sizeof(YesType) ;
1049 };
1050
1032 template<typename T> class GarbageCollected { 1051 template<typename T> class GarbageCollected {
1033 WTF_MAKE_NONCOPYABLE(GarbageCollected); 1052 WTF_MAKE_NONCOPYABLE(GarbageCollected);
1034 1053
1035 // For now direct allocation of arrays on the heap is not allowed. 1054 // For now direct allocation of arrays on the heap is not allowed.
1036 void* operator new[](size_t size); 1055 void* operator new[](size_t size);
1037 1056
1038 #if OS(WIN) && COMPILER(MSVC) 1057 #if OS(WIN) && COMPILER(MSVC)
1039 // Due to some quirkiness in the MSVC compiler we have to provide 1058 // Due to some quirkiness in the MSVC compiler we have to provide
1040 // the delete[] operator in the GarbageCollected subclasses as it 1059 // the delete[] operator in the GarbageCollected subclasses as it
1041 // is called when a class is exported in a DLL. 1060 // is called when a class is exported in a DLL.
1042 protected: 1061 protected:
1043 void operator delete[](void* p) 1062 void operator delete[](void* p)
1044 { 1063 {
1045 ASSERT_NOT_REACHED(); 1064 ASSERT_NOT_REACHED();
1046 } 1065 }
1047 #else 1066 #else
1048 void operator delete[](void* p); 1067 void operator delete[](void* p);
1049 #endif 1068 #endif
1050 1069
1051 public: 1070 public:
1052 using GarbageCollectedBase = T; 1071 using GarbageCollectedBase = T;
1053 1072
1054 void* operator new(size_t size) 1073 void* operator new(size_t size)
1055 { 1074 {
1056 return allocateObject(size); 1075 return allocateObject(size, IsEagerlyFinalizedType<T>::value);
1057 } 1076 }
1058 1077
1059 static void* allocateObject(size_t size) 1078 static void* allocateObject(size_t size, bool eagerlySweep)
1060 { 1079 {
1061 return Heap::allocate<T>(size); 1080 return Heap::allocate<T>(size, eagerlySweep);
1062 } 1081 }
1063 1082
1064 void operator delete(void* p) 1083 void operator delete(void* p)
1065 { 1084 {
1066 ASSERT_NOT_REACHED(); 1085 ASSERT_NOT_REACHED();
1067 } 1086 }
1068 1087
1069 protected: 1088 protected:
1070 GarbageCollected() 1089 GarbageCollected()
1071 { 1090 {
1072 } 1091 }
1073 }; 1092 };
1074 1093
1075 // Assigning class types to their heaps. 1094 // Assigning class types to their heaps.
1076 // 1095 //
1077 // We use sized heaps for most 'normal' objcts to improve memory locality. 1096 // We use sized heaps for most 'normal' objects to improve memory locality.
1078 // It seems that the same type of objects are likely to be accessed together, 1097 // It seems that the same type of objects are likely to be accessed together,
1079 // which means that we want to group objects by type. That's one reason 1098 // which means that we want to group objects by type. That's one reason
1080 // why we provide dedicated heaps for popular types (e.g., Node, CSSValue), 1099 // why we provide dedicated heaps for popular types (e.g., Node, CSSValue),
1081 // but it's not practical to prepare dedicated heaps for all types. 1100 // but it's not practical to prepare dedicated heaps for all types.
1082 // Thus we group objects by their sizes, hoping that this will approximately 1101 // Thus we group objects by their sizes, hoping that this will approximately
1083 // group objects by their types. 1102 // group objects by their types.
1084 // 1103 //
1085 // An exception to the use of sized heaps is made for class types that 1104 // An exception to the use of sized heaps is made for class types that
1086 // require prompt finalization after a garbage collection. That is, their 1105 // require prompt finalization after a garbage collection. That is, their
1087 // instances have to be finalized early and cannot be delayed until lazy 1106 // instances have to be finalized early and cannot be delayed until lazy
1088 // sweeping kicks in for their heap and page. The EAGERLY_FINALIZE() 1107 // sweeping kicks in for their heap and page. The EAGERLY_FINALIZE()
1089 // macro is used to declare a class (and its derived classes) as being 1108 // macro is used to declare a class (and its derived classes) as being
1090 // in need of eagerly finalized. Must be defined with 'public' visibility 1109 // in need of eager finalization. Must be defined with 'public' visibility
1091 // for a class. 1110 // for a class.
1092 // 1111 //
1093 template<typename T, typename Enabled = void> 1112
1094 class HeapIndexTrait { 1113 inline int Heap::heapIndexForObjectSize(size_t size)
1095 public: 1114 {
1096 static int heapIndexForObject(size_t size) 1115 if (size < 64) {
1097 { 1116 if (size < 32)
1098 if (size < 64) { 1117 return NormalPage1HeapIndex;
1099 if (size < 32) 1118 return NormalPage2HeapIndex;
1100 return NormalPage1HeapIndex;
1101 return NormalPage2HeapIndex;
1102 }
1103 if (size < 128)
1104 return NormalPage3HeapIndex;
1105 return NormalPage4HeapIndex;
1106 } 1119 }
1107 }; 1120 if (size < 128)
1121 return NormalPage3HeapIndex;
1122 return NormalPage4HeapIndex;
1123 }
1124
1125 inline bool Heap::isNormalHeapIndex(int index)
1126 {
1127 return index >= NormalPage1HeapIndex && index <= NormalPage4HeapIndex;
1128 }
1108 1129
1109 #if ENABLE_LAZY_SWEEPING 1130 #if ENABLE_LAZY_SWEEPING
1110 #define EAGERLY_FINALIZE() typedef int IsEagerlyFinalizedMarker 1131 #define EAGERLY_FINALIZE() typedef int IsEagerlyFinalizedMarker
1111 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() 1132 #define EAGERLY_FINALIZE_WILL_BE_REMOVED()
1112 #else 1133 #else
1113 #define EAGERLY_FINALIZE() 1134 #define EAGERLY_FINALIZE()
1114 // TODO(Oilpan): define in terms of Oilpan's EAGERLY_FINALIZE() once lazy 1135 // TODO(Oilpan): define in terms of Oilpan's EAGERLY_FINALIZE() once lazy
1115 // sweeping is enabled non-Oilpan. 1136 // sweeping is enabled non-Oilpan.
1116 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() 1137 #define EAGERLY_FINALIZE_WILL_BE_REMOVED()
1117 #endif 1138 #endif
1118 1139
1119 template<typename T>
1120 struct IsEagerlyFinalizedType {
1121 private:
1122 typedef char YesType;
1123 struct NoType {
1124 char padding[8];
1125 };
1126
1127 template <typename U> static YesType checkMarker(typename U::IsEagerlyFinali zedMarker*);
1128 template <typename U> static NoType checkMarker(...);
1129
1130 public:
1131 static const bool value = sizeof(checkMarker<T>(nullptr)) == sizeof(YesType) ;
1132 };
1133
1134 template<typename T>
1135 class HeapIndexTrait<T, typename WTF::EnableIf<IsEagerlyFinalizedType<T>::value> ::Type> {
1136 public:
1137 static int heapIndexForObject(size_t)
1138 {
1139 return EagerSweepHeapIndex;
1140 }
1141 };
1142
1143 NO_SANITIZE_ADDRESS inline 1140 NO_SANITIZE_ADDRESS inline
1144 size_t HeapObjectHeader::size() const 1141 size_t HeapObjectHeader::size() const
1145 { 1142 {
1146 size_t result = m_encoded & headerSizeMask; 1143 size_t result = m_encoded & headerSizeMask;
1147 // Large objects should not refer to header->size(). 1144 // Large objects should not refer to header->size().
1148 // The actual size of a large object is stored in 1145 // The actual size of a large object is stored in
1149 // LargeObjectPage::m_payloadSize. 1146 // LargeObjectPage::m_payloadSize.
1150 ASSERT(result != largeObjectSizeInHeader); 1147 ASSERT(result != largeObjectSizeInHeader);
1151 ASSERT(!pageFromObject(this)->isLargeObjectPage()); 1148 ASSERT(!pageFromObject(this)->isLargeObjectPage());
1152 return result; 1149 return result;
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
1257 1254
1258 inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he apIndex, size_t gcInfoIndex) 1255 inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he apIndex, size_t gcInfoIndex)
1259 { 1256 {
1260 ASSERT(state->isAllocationAllowed()); 1257 ASSERT(state->isAllocationAllowed());
1261 ASSERT(heapIndex != LargeObjectHeapIndex); 1258 ASSERT(heapIndex != LargeObjectHeapIndex);
1262 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); 1259 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex));
1263 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); 1260 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex);
1264 } 1261 }
1265 1262
1266 template<typename T> 1263 template<typename T>
1267 Address Heap::allocate(size_t size) 1264 Address Heap::allocate(size_t size, bool eagerlySweep)
1268 { 1265 {
1269 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); 1266 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1270 return Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>::heapIndexFo rObject(size), GCInfoTrait<T>::index()); 1267 return Heap::allocateOnHeapIndex(state, size, eagerlySweep ? EagerSweepHeapI ndex : Heap::heapIndexForObjectSize(size), GCInfoTrait<T>::index());
1271 } 1268 }
1272 1269
1273 template<typename T> 1270 template<typename T>
1274 Address Heap::reallocate(void* previous, size_t size) 1271 Address Heap::reallocate(void* previous, size_t size)
1275 { 1272 {
1273 // Not intended to be a full C realloc() substitute;
1274 // realloc(nullptr, size) is not a supported alias for malloc(size).
1275
1276 // TODO(sof): promptly free the previous object.
1276 if (!size) { 1277 if (!size) {
1277 // If the new size is 0 this is equivalent to either free(previous) or 1278 // If the new size is 0 this is considered equivalent to free(previous).
1278 // malloc(0). In both cases we do nothing and return nullptr.
1279 return nullptr; 1279 return nullptr;
1280 } 1280 }
1281
1281 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); 1282 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1282 // TODO(haraken): reallocate() should use the heap that the original object
1283 // is using. This won't be a big deal since reallocate() is rarely used.
1284 Address address = Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>:: heapIndexForObject(size), GCInfoTrait<T>::index());
1285 if (!previous) {
1286 // This is equivalent to malloc(size).
1287 return address;
1288 }
1289 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); 1283 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous);
1284 BasePage* page = pageFromObject(previousHeader);
1285 ASSERT(page);
1286 int heapIndex = page->heap()->heapIndex();
1287 // Recompute the effective heap index if previous allocation
1288 // was on the normal heaps or a large object.
1289 if (isNormalHeapIndex(heapIndex) || heapIndex == LargeObjectHeapIndex)
1290 heapIndex = heapIndexForObjectSize(size);
1291
1290 // TODO(haraken): We don't support reallocate() for finalizable objects. 1292 // TODO(haraken): We don't support reallocate() for finalizable objects.
1291 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); 1293 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer());
1292 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); 1294 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index());
1295 Address address = Heap::allocateOnHeapIndex(state, size, heapIndex, GCInfoTr ait<T>::index());
1293 size_t copySize = previousHeader->payloadSize(); 1296 size_t copySize = previousHeader->payloadSize();
1294 if (copySize > size) 1297 if (copySize > size)
1295 copySize = size; 1298 copySize = size;
1296 memcpy(address, previous, copySize); 1299 memcpy(address, previous, copySize);
1297 return address; 1300 return address;
1298 } 1301 }
1299 1302
1300 } // namespace blink 1303 } // namespace blink
1301 1304
1302 #endif // Heap_h 1305 #endif // Heap_h
OLDNEW
« no previous file with comments | « Source/platform/heap/GarbageCollected.h ('k') | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698