| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 1086 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1097 { | 1097 { |
| 1098 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(payload()); | 1098 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(payload()); |
| 1099 return header->isFree() && header->size() == payloadSize(); | 1099 return header->isFree() && header->size() == payloadSize(); |
| 1100 } | 1100 } |
| 1101 | 1101 |
| 1102 void NormalPage::removeFromHeap() | 1102 void NormalPage::removeFromHeap() |
| 1103 { | 1103 { |
| 1104 heapForNormalPage()->freePage(this); | 1104 heapForNormalPage()->freePage(this); |
| 1105 } | 1105 } |
| 1106 | 1106 |
| 1107 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
| 1108 static void discardPages(Address begin, Address end) |
| 1109 { |
| 1110 uintptr_t beginAddress = WTF::roundUpToSystemPage(reinterpret_cast<uintptr_t
>(begin)); |
| 1111 uintptr_t endAddress = WTF::roundDownToSystemPage(reinterpret_cast<uintptr_t
>(end)); |
| 1112 if (beginAddress < endAddress) |
| 1113 WTF::discardSystemPages(reinterpret_cast<void*>(beginAddress), endAddres
s - beginAddress); |
| 1114 } |
| 1115 #endif |
| 1116 |
| 1107 void NormalPage::sweep() | 1117 void NormalPage::sweep() |
| 1108 { | 1118 { |
| 1109 size_t markedObjectSize = 0; | 1119 size_t markedObjectSize = 0; |
| 1110 Address startOfGap = payload(); | 1120 Address startOfGap = payload(); |
| 1111 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) { | 1121 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) { |
| 1112 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1122 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
| 1113 ASSERT(header->size() > 0); | 1123 size_t size = header->size(); |
| 1114 ASSERT(header->size() < blinkPagePayloadSize()); | 1124 ASSERT(size > 0); |
| 1125 ASSERT(size < blinkPagePayloadSize()); |
| 1115 | 1126 |
| 1116 if (header->isPromptlyFreed()) | 1127 if (header->isPromptlyFreed()) |
| 1117 heapForNormalPage()->decreasePromptlyFreedSize(header->size()); | 1128 heapForNormalPage()->decreasePromptlyFreedSize(size); |
| 1118 if (header->isFree()) { | 1129 if (header->isFree()) { |
| 1119 size_t size = header->size(); | |
| 1120 // Zero the memory in the free list header to maintain the | 1130 // Zero the memory in the free list header to maintain the |
| 1121 // invariant that memory on the free list is zero filled. | 1131 // invariant that memory on the free list is zero filled. |
| 1122 // The rest of the memory is already on the free list and is | 1132 // The rest of the memory is already on the free list and is |
| 1123 // therefore already zero filled. | 1133 // therefore already zero filled. |
| 1124 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry)
? size : sizeof(FreeListEntry)); | 1134 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry)
? size : sizeof(FreeListEntry)); |
| 1125 CHECK_MEMORY_INACCESSIBLE(headerAddress, size); | 1135 CHECK_MEMORY_INACCESSIBLE(headerAddress, size); |
| 1126 headerAddress += size; | 1136 headerAddress += size; |
| 1127 continue; | 1137 continue; |
| 1128 } | 1138 } |
| 1129 ASSERT(header->checkHeader()); | 1139 ASSERT(header->checkHeader()); |
| 1130 | 1140 |
| 1131 if (!header->isMarked()) { | 1141 if (!header->isMarked()) { |
| 1132 size_t size = header->size(); | |
| 1133 // This is a fast version of header->payloadSize(). | 1142 // This is a fast version of header->payloadSize(). |
| 1134 size_t payloadSize = size - sizeof(HeapObjectHeader); | 1143 size_t payloadSize = size - sizeof(HeapObjectHeader); |
| 1135 Address payload = header->payload(); | 1144 Address payload = header->payload(); |
| 1136 // For ASan, unpoison the object before calling the finalizer. The | 1145 // For ASan, unpoison the object before calling the finalizer. The |
| 1137 // finalized object will be zero-filled and poison'ed afterwards. | 1146 // finalized object will be zero-filled and poison'ed afterwards. |
| 1138 // Given all other unmarked objects are poisoned, ASan will detect | 1147 // Given all other unmarked objects are poisoned, ASan will detect |
| 1139 // an error if the finalizer touches any other on-heap object that | 1148 // an error if the finalizer touches any other on-heap object that |
| 1140 // die at the same GC cycle. | 1149 // die at the same GC cycle. |
| 1141 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize); | 1150 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize); |
| 1142 header->finalize(payload, payloadSize); | 1151 header->finalize(payload, payloadSize); |
| 1143 // This memory will be added to the freelist. Maintain the invariant | 1152 // This memory will be added to the freelist. Maintain the invariant |
| 1144 // that memory on the freelist is zero filled. | 1153 // that memory on the freelist is zero filled. |
| 1145 SET_MEMORY_INACCESSIBLE(headerAddress, size); | 1154 SET_MEMORY_INACCESSIBLE(headerAddress, size); |
| 1146 headerAddress += size; | 1155 headerAddress += size; |
| 1147 continue; | 1156 continue; |
| 1148 } | 1157 } |
| 1149 if (startOfGap != headerAddress) | 1158 if (startOfGap != headerAddress) { |
| 1150 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start
OfGap); | 1159 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start
OfGap); |
| 1160 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
| 1161 // Discarding pages increases page faults and may regress performanc
e. |
| 1162 // So we enable this only on low-RAM devices. |
| 1163 if (Heap::isLowEndDevice()) |
| 1164 discardPages(startOfGap + sizeof(FreeListEntry), headerAddress); |
| 1165 #endif |
| 1166 } |
| 1151 header->unmark(); | 1167 header->unmark(); |
| 1152 headerAddress += header->size(); | 1168 headerAddress += size; |
| 1153 markedObjectSize += header->size(); | 1169 markedObjectSize += size; |
| 1154 startOfGap = headerAddress; | 1170 startOfGap = headerAddress; |
| 1155 } | 1171 } |
| 1156 if (startOfGap != payloadEnd()) | 1172 if (startOfGap != payloadEnd()) { |
| 1157 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap
); | 1173 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap
); |
| 1174 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
| 1175 if (Heap::isLowEndDevice()) |
| 1176 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); |
| 1177 #endif |
| 1178 } |
| 1158 | 1179 |
| 1159 if (markedObjectSize) | 1180 if (markedObjectSize) |
| 1160 Heap::increaseMarkedObjectSize(markedObjectSize); | 1181 Heap::increaseMarkedObjectSize(markedObjectSize); |
| 1161 } | 1182 } |
| 1162 | 1183 |
| 1163 void NormalPage::makeConsistentForGC() | 1184 void NormalPage::makeConsistentForGC() |
| 1164 { | 1185 { |
| 1165 size_t markedObjectSize = 0; | 1186 size_t markedObjectSize = 0; |
| 1166 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1187 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1167 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1188 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
| (...skipping 406 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1574 | 1595 |
| 1575 m_hasEntries = true; | 1596 m_hasEntries = true; |
| 1576 size_t index = hash(address); | 1597 size_t index = hash(address); |
| 1577 ASSERT(!(index & 1)); | 1598 ASSERT(!(index & 1)); |
| 1578 Address cachePage = roundToBlinkPageStart(address); | 1599 Address cachePage = roundToBlinkPageStart(address); |
| 1579 m_entries[index + 1] = m_entries[index]; | 1600 m_entries[index + 1] = m_entries[index]; |
| 1580 m_entries[index] = cachePage; | 1601 m_entries[index] = cachePage; |
| 1581 } | 1602 } |
| 1582 | 1603 |
| 1583 } // namespace blink | 1604 } // namespace blink |
| OLD | NEW |