Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 368 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 379 BasePage* next() const { return m_next; } | 379 BasePage* next() const { return m_next; } |
| 380 | 380 |
| 381 // virtual methods are slow. So performance-sensitive methods | 381 // virtual methods are slow. So performance-sensitive methods |
| 382 // should be defined as non-virtual methods on NormalPage and LargeObjectPag e. | 382 // should be defined as non-virtual methods on NormalPage and LargeObjectPag e. |
| 383 // The following methods are not performance-sensitive. | 383 // The following methods are not performance-sensitive. |
| 384 virtual size_t objectPayloadSizeForTesting() = 0; | 384 virtual size_t objectPayloadSizeForTesting() = 0; |
| 385 virtual bool isEmpty() = 0; | 385 virtual bool isEmpty() = 0; |
| 386 virtual void removeFromHeap() = 0; | 386 virtual void removeFromHeap() = 0; |
| 387 virtual void sweep() = 0; | 387 virtual void sweep() = 0; |
| 388 virtual void markUnmarkedObjectsDead() = 0; | 388 virtual void markUnmarkedObjectsDead() = 0; |
| 389 | |
| 389 #if defined(ADDRESS_SANITIZER) | 390 #if defined(ADDRESS_SANITIZER) |
| 390 virtual void poisonUnmarkedObjects() = 0; | 391 enum ObjectsToPoison { |
| 392 UnmarkedOnly, | |
| 393 UnmarkedOrMarked, | |
| 394 }; | |
| 395 enum Poisoning { | |
| 396 SetPoison, | |
| 397 ClearPoison, | |
| 398 }; | |
| 399 virtual void poisonObjects(ObjectsToPoison, Poisoning) = 0; | |
| 391 #endif | 400 #endif |
| 392 // Check if the given address points to an object in this | 401 // Check if the given address points to an object in this |
| 393 // heap page. If so, find the start of that object and mark it | 402 // heap page. If so, find the start of that object and mark it |
| 394 // using the given Visitor. Otherwise do nothing. The pointer must | 403 // using the given Visitor. Otherwise do nothing. The pointer must |
| 395 // be within the same aligned blinkPageSize as the this-pointer. | 404 // be within the same aligned blinkPageSize as the this-pointer. |
| 396 // | 405 // |
| 397 // This is used during conservative stack scanning to | 406 // This is used during conservative stack scanning to |
| 398 // conservatively mark all objects that could be referenced from | 407 // conservatively mark all objects that could be referenced from |
| 399 // the stack. | 408 // the stack. |
| 400 virtual void checkAndMarkPointer(Visitor*, Address) = 0; | 409 virtual void checkAndMarkPointer(Visitor*, Address) = 0; |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 467 { | 476 { |
| 468 return payload() <= address && address < payloadEnd(); | 477 return payload() <= address && address < payloadEnd(); |
| 469 } | 478 } |
| 470 | 479 |
| 471 virtual size_t objectPayloadSizeForTesting() override; | 480 virtual size_t objectPayloadSizeForTesting() override; |
| 472 virtual bool isEmpty() override; | 481 virtual bool isEmpty() override; |
| 473 virtual void removeFromHeap() override; | 482 virtual void removeFromHeap() override; |
| 474 virtual void sweep() override; | 483 virtual void sweep() override; |
| 475 virtual void markUnmarkedObjectsDead() override; | 484 virtual void markUnmarkedObjectsDead() override; |
| 476 #if defined(ADDRESS_SANITIZER) | 485 #if defined(ADDRESS_SANITIZER) |
| 477 virtual void poisonUnmarkedObjects() override; | 486 virtual void poisonObjects(ObjectsToPoison, Poisoning) override; |
| 478 #endif | 487 #endif |
| 479 virtual void checkAndMarkPointer(Visitor*, Address) override; | 488 virtual void checkAndMarkPointer(Visitor*, Address) override; |
| 480 virtual void markOrphaned() override; | 489 virtual void markOrphaned() override; |
| 481 #if ENABLE(GC_PROFILING) | 490 #if ENABLE(GC_PROFILING) |
| 482 const GCInfo* findGCInfo(Address) override; | 491 const GCInfo* findGCInfo(Address) override; |
| 483 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; | 492 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; |
| 484 void incrementMarkedObjectsAge() override; | 493 void incrementMarkedObjectsAge() override; |
| 485 void countMarkedObjects(ClassAgeCountsMap&) override; | 494 void countMarkedObjects(ClassAgeCountsMap&) override; |
| 486 void countObjectsToSweep(ClassAgeCountsMap&) override; | 495 void countObjectsToSweep(ClassAgeCountsMap&) override; |
| 487 #endif | 496 #endif |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 529 { | 538 { |
| 530 return payload() <= address && address < payloadEnd(); | 539 return payload() <= address && address < payloadEnd(); |
| 531 } | 540 } |
| 532 | 541 |
| 533 virtual size_t objectPayloadSizeForTesting() override; | 542 virtual size_t objectPayloadSizeForTesting() override; |
| 534 virtual bool isEmpty() override; | 543 virtual bool isEmpty() override; |
| 535 virtual void removeFromHeap() override; | 544 virtual void removeFromHeap() override; |
| 536 virtual void sweep() override; | 545 virtual void sweep() override; |
| 537 virtual void markUnmarkedObjectsDead() override; | 546 virtual void markUnmarkedObjectsDead() override; |
| 538 #if defined(ADDRESS_SANITIZER) | 547 #if defined(ADDRESS_SANITIZER) |
| 539 virtual void poisonUnmarkedObjects() override; | 548 virtual void poisonObjects(ObjectsToPoison, Poisoning) override; |
| 540 #endif | 549 #endif |
| 541 virtual void checkAndMarkPointer(Visitor*, Address) override; | 550 virtual void checkAndMarkPointer(Visitor*, Address) override; |
| 542 virtual void markOrphaned() override; | 551 virtual void markOrphaned() override; |
| 543 | 552 |
| 544 #if ENABLE(GC_PROFILING) | 553 #if ENABLE(GC_PROFILING) |
| 545 const GCInfo* findGCInfo(Address) override; | 554 const GCInfo* findGCInfo(Address) override; |
| 546 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; | 555 void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; |
| 547 void incrementMarkedObjectsAge() override; | 556 void incrementMarkedObjectsAge() override; |
| 548 void countMarkedObjects(ClassAgeCountsMap&) override; | 557 void countMarkedObjects(ClassAgeCountsMap&) override; |
| 549 void countObjectsToSweep(ClassAgeCountsMap&) override; | 558 void countObjectsToSweep(ClassAgeCountsMap&) override; |
| (...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 694 virtual void clearFreeLists() { } | 703 virtual void clearFreeLists() { } |
| 695 void makeConsistentForSweeping(); | 704 void makeConsistentForSweeping(); |
| 696 #if ENABLE(ASSERT) | 705 #if ENABLE(ASSERT) |
| 697 virtual bool isConsistentForSweeping() = 0; | 706 virtual bool isConsistentForSweeping() = 0; |
| 698 #endif | 707 #endif |
| 699 size_t objectPayloadSizeForTesting(); | 708 size_t objectPayloadSizeForTesting(); |
| 700 void prepareHeapForTermination(); | 709 void prepareHeapForTermination(); |
| 701 void prepareForSweep(); | 710 void prepareForSweep(); |
| 702 #if defined(ADDRESS_SANITIZER) | 711 #if defined(ADDRESS_SANITIZER) |
| 703 void poisonUnmarkedObjects(); | 712 void poisonUnmarkedObjects(); |
| 713 void poisonHeap(bool poisonOrNot); | |
| 704 #endif | 714 #endif |
| 705 Address lazySweep(size_t, size_t gcInfoIndex); | 715 Address lazySweep(size_t, size_t gcInfoIndex); |
| 706 void sweepUnsweptPage(); | 716 void sweepUnsweptPage(); |
| 707 // Returns true if we have swept all pages within the deadline. | 717 // Returns true if we have swept all pages within the deadline. |
| 708 // Returns false otherwise. | 718 // Returns false otherwise. |
| 709 bool lazySweepWithDeadline(double deadlineSeconds); | 719 bool lazySweepWithDeadline(double deadlineSeconds); |
| 710 void completeSweep(); | 720 void completeSweep(); |
| 711 | 721 |
| 712 ThreadState* threadState() { return m_threadState; } | 722 ThreadState* threadState() { return m_threadState; } |
| 713 int heapIndex() const { return m_index; } | 723 int heapIndex() const { return m_index; } |
| (...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 897 size_t allocationSize = size + sizeof(HeapObjectHeader); | 907 size_t allocationSize = size + sizeof(HeapObjectHeader); |
| 898 // Align size with allocation granularity. | 908 // Align size with allocation granularity. |
| 899 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 909 allocationSize = (allocationSize + allocationMask) & ~allocationMask; |
| 900 return allocationSize; | 910 return allocationSize; |
| 901 } | 911 } |
| 902 static inline size_t roundedAllocationSize(size_t size) | 912 static inline size_t roundedAllocationSize(size_t size) |
| 903 { | 913 { |
| 904 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); | 914 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); |
| 905 } | 915 } |
| 906 static Address allocateOnHeapIndex(ThreadState*, size_t, int heapIndex, size _t gcInfoIndex); | 916 static Address allocateOnHeapIndex(ThreadState*, size_t, int heapIndex, size _t gcInfoIndex); |
| 907 template<typename T> static Address allocate(size_t); | 917 template<typename T> static Address allocate(size_t, bool eagerlySweep = fal se); |
| 908 template<typename T> static Address reallocate(void* previous, size_t); | 918 template<typename T> static Address reallocate(void* previous, size_t); |
| 909 | 919 |
| 910 enum GCReason { | 920 enum GCReason { |
| 911 IdleGC, | 921 IdleGC, |
| 912 PreciseGC, | 922 PreciseGC, |
| 913 ConservativeGC, | 923 ConservativeGC, |
| 914 ForcedGC, | 924 ForcedGC, |
| 915 NumberOfGCReason | 925 NumberOfGCReason |
| 916 }; | 926 }; |
| 917 static const char* gcReasonString(GCReason); | 927 static const char* gcReasonString(GCReason); |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1003 static void remove(PageMemoryRegion*, RegionTree**); | 1013 static void remove(PageMemoryRegion*, RegionTree**); |
| 1004 private: | 1014 private: |
| 1005 PageMemoryRegion* m_region; | 1015 PageMemoryRegion* m_region; |
| 1006 RegionTree* m_left; | 1016 RegionTree* m_left; |
| 1007 RegionTree* m_right; | 1017 RegionTree* m_right; |
| 1008 }; | 1018 }; |
| 1009 | 1019 |
| 1010 // Reset counters that track live and allocated-since-last-GC sizes. | 1020 // Reset counters that track live and allocated-since-last-GC sizes. |
| 1011 static void resetHeapCounters(); | 1021 static void resetHeapCounters(); |
| 1012 | 1022 |
| 1023 static int heapIndexForObjectSize(size_t); | |
| 1024 static bool isNormalHeapIndex(int); | |
| 1025 | |
| 1013 static Visitor* s_markingVisitor; | 1026 static Visitor* s_markingVisitor; |
| 1014 static CallbackStack* s_markingStack; | 1027 static CallbackStack* s_markingStack; |
| 1015 static CallbackStack* s_postMarkingCallbackStack; | 1028 static CallbackStack* s_postMarkingCallbackStack; |
| 1016 static CallbackStack* s_globalWeakCallbackStack; | 1029 static CallbackStack* s_globalWeakCallbackStack; |
| 1017 static CallbackStack* s_ephemeronStack; | 1030 static CallbackStack* s_ephemeronStack; |
| 1018 static HeapDoesNotContainCache* s_heapDoesNotContainCache; | 1031 static HeapDoesNotContainCache* s_heapDoesNotContainCache; |
| 1019 static bool s_shutdownCalled; | 1032 static bool s_shutdownCalled; |
| 1020 static bool s_lastGCWasConservative; | 1033 static bool s_lastGCWasConservative; |
| 1021 static FreePagePool* s_freePagePool; | 1034 static FreePagePool* s_freePagePool; |
| 1022 static OrphanedPagePool* s_orphanedPagePool; | 1035 static OrphanedPagePool* s_orphanedPagePool; |
| 1023 static RegionTree* s_regionTree; | 1036 static RegionTree* s_regionTree; |
| 1024 static size_t s_allocatedSpace; | 1037 static size_t s_allocatedSpace; |
| 1025 static size_t s_allocatedObjectSize; | 1038 static size_t s_allocatedObjectSize; |
| 1026 static size_t s_markedObjectSize; | 1039 static size_t s_markedObjectSize; |
| 1027 static size_t s_estimatedLiveObjectSize; | 1040 static size_t s_estimatedLiveObjectSize; |
| 1028 static size_t s_externalObjectSizeAtLastGC; | 1041 static size_t s_externalObjectSizeAtLastGC; |
| 1029 static double s_estimatedMarkingTimePerByte; | 1042 static double s_estimatedMarkingTimePerByte; |
| 1030 | 1043 |
| 1031 friend class ThreadState; | 1044 friend class ThreadState; |
| 1032 }; | 1045 }; |
| 1033 | 1046 |
| 1047 template<typename T> | |
| 1048 struct IsEagerlyFinalizedType { | |
| 1049 private: | |
| 1050 typedef char YesType; | |
| 1051 struct NoType { | |
| 1052 char padding[8]; | |
| 1053 }; | |
| 1054 | |
| 1055 template <typename U> static YesType checkMarker(typename U::IsEagerlyFinali zedMarker*); | |
| 1056 template <typename U> static NoType checkMarker(...); | |
| 1057 | |
| 1058 public: | |
| 1059 static const bool value = sizeof(checkMarker<T>(nullptr)) == sizeof(YesType) ; | |
| 1060 }; | |
| 1061 | |
| 1034 template<typename T> class GarbageCollected { | 1062 template<typename T> class GarbageCollected { |
| 1035 WTF_MAKE_NONCOPYABLE(GarbageCollected); | 1063 WTF_MAKE_NONCOPYABLE(GarbageCollected); |
| 1036 | 1064 |
| 1037 // For now direct allocation of arrays on the heap is not allowed. | 1065 // For now direct allocation of arrays on the heap is not allowed. |
| 1038 void* operator new[](size_t size); | 1066 void* operator new[](size_t size); |
| 1039 | 1067 |
| 1040 #if OS(WIN) && COMPILER(MSVC) | 1068 #if OS(WIN) && COMPILER(MSVC) |
| 1041 // Due to some quirkiness in the MSVC compiler we have to provide | 1069 // Due to some quirkiness in the MSVC compiler we have to provide |
| 1042 // the delete[] operator in the GarbageCollected subclasses as it | 1070 // the delete[] operator in the GarbageCollected subclasses as it |
| 1043 // is called when a class is exported in a DLL. | 1071 // is called when a class is exported in a DLL. |
| 1044 protected: | 1072 protected: |
| 1045 void operator delete[](void* p) | 1073 void operator delete[](void* p) |
| 1046 { | 1074 { |
| 1047 ASSERT_NOT_REACHED(); | 1075 ASSERT_NOT_REACHED(); |
| 1048 } | 1076 } |
| 1049 #else | 1077 #else |
| 1050 void operator delete[](void* p); | 1078 void operator delete[](void* p); |
| 1051 #endif | 1079 #endif |
| 1052 | 1080 |
| 1053 public: | 1081 public: |
| 1054 using GarbageCollectedBase = T; | 1082 using GarbageCollectedBase = T; |
| 1055 | 1083 |
| 1056 void* operator new(size_t size) | 1084 void* operator new(size_t size) |
| 1057 { | 1085 { |
| 1058 return allocateObject(size); | 1086 return allocateObject(size, IsEagerlyFinalizedType<T>::value); |
| 1059 } | 1087 } |
| 1060 | 1088 |
| 1061 static void* allocateObject(size_t size) | 1089 static void* allocateObject(size_t size, bool eagerlySweep) |
| 1062 { | 1090 { |
| 1063 return Heap::allocate<T>(size); | 1091 return Heap::allocate<T>(size, eagerlySweep); |
| 1064 } | 1092 } |
| 1065 | 1093 |
| 1066 void operator delete(void* p) | 1094 void operator delete(void* p) |
| 1067 { | 1095 { |
| 1068 ASSERT_NOT_REACHED(); | 1096 ASSERT_NOT_REACHED(); |
| 1069 } | 1097 } |
| 1070 | 1098 |
| 1071 protected: | 1099 protected: |
| 1072 GarbageCollected() | 1100 GarbageCollected() |
| 1073 { | 1101 { |
| 1074 } | 1102 } |
| 1075 }; | 1103 }; |
| 1076 | 1104 |
| 1077 // Assigning class types to their heaps. | 1105 // Assigning class types to their heaps. |
| 1078 // | 1106 // |
| 1079 // We use sized heaps for most 'normal' objcts to improve memory locality. | 1107 // We use sized heaps for most 'normal' objects to improve memory locality. |
| 1080 // It seems that the same type of objects are likely to be accessed together, | 1108 // It seems that the same type of objects are likely to be accessed together, |
| 1081 // which means that we want to group objects by type. That's one reason | 1109 // which means that we want to group objects by type. That's one reason |
| 1082 // why we provide dedicated heaps for popular types (e.g., Node, CSSValue), | 1110 // why we provide dedicated heaps for popular types (e.g., Node, CSSValue), |
| 1083 // but it's not practical to prepare dedicated heaps for all types. | 1111 // but it's not practical to prepare dedicated heaps for all types. |
| 1084 // Thus we group objects by their sizes, hoping that this will approximately | 1112 // Thus we group objects by their sizes, hoping that this will approximately |
| 1085 // group objects by their types. | 1113 // group objects by their types. |
| 1086 // | 1114 // |
| 1087 // An exception to the use of sized heaps is made for class types that | 1115 // An exception to the use of sized heaps is made for class types that |
| 1088 // require prompt finalization after a garbage collection. That is, their | 1116 // require prompt finalization after a garbage collection. That is, their |
| 1089 // instances have to be finalized early and cannot be delayed until lazy | 1117 // instances have to be finalized early and cannot be delayed until lazy |
| 1090 // sweeping kicks in for their heap and page. The EAGERLY_FINALIZE() | 1118 // sweeping kicks in for their heap and page. The EAGERLY_FINALIZE() |
| 1091 // macro is used to declare a class (and its derived classes) as being | 1119 // macro is used to declare a class (and its derived classes) as being |
| 1092 // in need of eagerly finalized. Must be defined with 'public' visibility | 1120 // in need of eager finalization. Must be defined with 'public' visibility |
| 1093 // for a class. | 1121 // for a class. |
| 1094 // | 1122 // |
| 1095 template<typename T, typename Enabled = void> | 1123 |
| 1096 class HeapIndexTrait { | 1124 inline int Heap::heapIndexForObjectSize(size_t size) |
| 1097 public: | 1125 { |
| 1098 static int heapIndexForObject(size_t size) | 1126 if (size < 64) { |
| 1099 { | 1127 if (size < 32) |
| 1100 if (size < 64) { | 1128 return NormalPage1HeapIndex; |
| 1101 if (size < 32) | 1129 return NormalPage2HeapIndex; |
| 1102 return NormalPage1HeapIndex; | |
| 1103 return NormalPage2HeapIndex; | |
| 1104 } | |
| 1105 if (size < 128) | |
| 1106 return NormalPage3HeapIndex; | |
| 1107 return NormalPage4HeapIndex; | |
| 1108 } | 1130 } |
| 1109 }; | 1131 if (size < 128) |
| 1132 return NormalPage3HeapIndex; | |
| 1133 return NormalPage4HeapIndex; | |
| 1134 } | |
| 1135 | |
| 1136 inline bool Heap::isNormalHeapIndex(int index) | |
| 1137 { | |
| 1138 return index >= NormalPage1HeapIndex && index <= NormalPage4HeapIndex; | |
| 1139 } | |
| 1110 | 1140 |
| 1111 // TODO(Oilpan): enable this macro when enabling lazy sweeping, non-Oilpan. | 1141 // TODO(Oilpan): enable this macro when enabling lazy sweeping, non-Oilpan. |
| 1112 #if ENABLE(OILPAN) | 1142 #if ENABLE(OILPAN) |
| 1113 #define EAGERLY_FINALIZE() typedef int IsEagerlyFinalizedMarker | 1143 #define EAGERLY_FINALIZE() typedef int IsEagerlyFinalizedMarker |
| 1114 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() | 1144 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() |
| 1115 #else | 1145 #else |
| 1116 #define EAGERLY_FINALIZE() | 1146 #define EAGERLY_FINALIZE() |
| 1117 // TODO(Oilpan): define in terms of Oilpan's EAGERLY_FINALIZE() once lazy | 1147 // TODO(Oilpan): define in terms of Oilpan's EAGERLY_FINALIZE() once lazy |
| 1118 // sweeping is enabled non-Oilpan. | 1148 // sweeping is enabled non-Oilpan. |
| 1119 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() | 1149 #define EAGERLY_FINALIZE_WILL_BE_REMOVED() |
| 1120 #endif | 1150 #endif |
| 1121 | 1151 |
| 1122 template<typename T> | |
| 1123 struct IsEagerlyFinalizedType { | |
| 1124 private: | |
| 1125 typedef char YesType; | |
| 1126 struct NoType { | |
| 1127 char padding[8]; | |
| 1128 }; | |
| 1129 | |
| 1130 template <typename U> static YesType checkMarker(typename U::IsEagerlyFinali zedMarker*); | |
| 1131 template <typename U> static NoType checkMarker(...); | |
| 1132 | |
| 1133 public: | |
| 1134 static const bool value = sizeof(checkMarker<T>(nullptr)) == sizeof(YesType) ; | |
| 1135 }; | |
| 1136 | |
| 1137 template<typename T> | |
| 1138 class HeapIndexTrait<T, typename WTF::EnableIf<IsEagerlyFinalizedType<T>::value> ::Type> { | |
| 1139 public: | |
| 1140 static int heapIndexForObject(size_t) | |
| 1141 { | |
| 1142 return EagerSweepHeapIndex; | |
| 1143 } | |
| 1144 }; | |
| 1145 | |
| 1146 NO_SANITIZE_ADDRESS inline | 1152 NO_SANITIZE_ADDRESS inline |
| 1147 size_t HeapObjectHeader::size() const | 1153 size_t HeapObjectHeader::size() const |
| 1148 { | 1154 { |
| 1149 size_t result = m_encoded & headerSizeMask; | 1155 size_t result = m_encoded & headerSizeMask; |
| 1150 // Large objects should not refer to header->size(). | 1156 // Large objects should not refer to header->size(). |
| 1151 // The actual size of a large object is stored in | 1157 // The actual size of a large object is stored in |
| 1152 // LargeObjectPage::m_payloadSize. | 1158 // LargeObjectPage::m_payloadSize. |
| 1153 ASSERT(result != largeObjectSizeInHeader); | 1159 ASSERT(result != largeObjectSizeInHeader); |
| 1154 ASSERT(!pageFromObject(this)->isLargeObjectPage()); | 1160 ASSERT(!pageFromObject(this)->isLargeObjectPage()); |
| 1155 return result; | 1161 return result; |
| (...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1260 | 1266 |
| 1261 inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he apIndex, size_t gcInfoIndex) | 1267 inline Address Heap::allocateOnHeapIndex(ThreadState* state, size_t size, int he apIndex, size_t gcInfoIndex) |
| 1262 { | 1268 { |
| 1263 ASSERT(state->isAllocationAllowed()); | 1269 ASSERT(state->isAllocationAllowed()); |
| 1264 ASSERT(heapIndex != LargeObjectHeapIndex); | 1270 ASSERT(heapIndex != LargeObjectHeapIndex); |
| 1265 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); | 1271 NormalPageHeap* heap = static_cast<NormalPageHeap*>(state->heap(heapIndex)); |
| 1266 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); | 1272 return heap->allocateObject(allocationSizeFromSize(size), gcInfoIndex); |
| 1267 } | 1273 } |
| 1268 | 1274 |
| 1269 template<typename T> | 1275 template<typename T> |
| 1270 Address Heap::allocate(size_t size) | 1276 Address Heap::allocate(size_t size, bool eagerlySweep) |
| 1271 { | 1277 { |
| 1272 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1278 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
| 1273 return Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>::heapIndexFo rObject(size), GCInfoTrait<T>::index()); | 1279 return Heap::allocateOnHeapIndex(state, size, eagerlySweep ? EagerSweepHeapI ndex : Heap::heapIndexForObjectSize(size), GCInfoTrait<T>::index()); |
| 1274 } | 1280 } |
| 1275 | 1281 |
| 1276 template<typename T> | 1282 template<typename T> |
| 1277 Address Heap::reallocate(void* previous, size_t size) | 1283 Address Heap::reallocate(void* previous, size_t size) |
| 1278 { | 1284 { |
| 1285 // Not intended to be a full C realloc() substitute; | |
| 1286 // realloc(nullptr, size) is not a supported alias for malloc(size). | |
| 1287 | |
| 1288 // TODO(sof): promptly free the previous object. | |
| 1279 if (!size) { | 1289 if (!size) { |
| 1280 // If the new size is 0 this is equivalent to either free(previous) or | 1290 // If the new size is 0 this is considered equivalent to free(previous). |
| 1281 // malloc(0). In both cases we do nothing and return nullptr. | |
| 1282 return nullptr; | 1291 return nullptr; |
| 1283 } | 1292 } |
| 1293 | |
| 1284 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1294 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
| 1285 // TODO(haraken): reallocate() should use the heap that the original object | |
| 1286 // is using. This won't be a big deal since reallocate() is rarely used. | |
| 1287 Address address = Heap::allocateOnHeapIndex(state, size, HeapIndexTrait<T>:: heapIndexForObject(size), GCInfoTrait<T>::index()); | |
| 1288 if (!previous) { | |
| 1289 // This is equivalent to malloc(size). | |
| 1290 return address; | |
| 1291 } | |
| 1292 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); | 1295 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); |
| 1296 BasePage* page = pageFromObject(previousHeader); | |
| 1297 ASSERT(page); | |
| 1298 int heapIndex = page->heap()->heapIndex(); | |
|
haraken
2015/05/28 12:30:04
I'd add ASSERT(heapIndex != EagerSweepHeapIndex) /
sof
2015/05/28 12:51:39
That seems like a random restriction to make; what
| |
| 1299 // Recompute the effective heap index if previous allocation | |
| 1300 // was on the normal heaps or a large object. | |
| 1301 if (isNormalHeapIndex(heapIndex) || heapIndex == LargeObjectHeapIndex) | |
| 1302 heapIndex = heapIndexForObjectSize(size); | |
| 1303 | |
| 1293 // TODO(haraken): We don't support reallocate() for finalizable objects. | 1304 // TODO(haraken): We don't support reallocate() for finalizable objects. |
| 1294 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); | 1305 ASSERT(!Heap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); |
| 1295 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); | 1306 ASSERT(previousHeader->gcInfoIndex() == GCInfoTrait<T>::index()); |
| 1307 Address address = Heap::allocateOnHeapIndex(state, size, heapIndex, GCInfoTr ait<T>::index()); | |
| 1296 size_t copySize = previousHeader->payloadSize(); | 1308 size_t copySize = previousHeader->payloadSize(); |
| 1297 if (copySize > size) | 1309 if (copySize > size) |
| 1298 copySize = size; | 1310 copySize = size; |
| 1299 memcpy(address, previous, copySize); | 1311 memcpy(address, previous, copySize); |
| 1300 return address; | 1312 return address; |
| 1301 } | 1313 } |
| 1302 | 1314 |
| 1303 } // namespace blink | 1315 } // namespace blink |
| 1304 | 1316 |
| 1305 #endif // Heap_h | 1317 #endif // Heap_h |
| OLD | NEW |