Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 527 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 538 if (startOfGap != headerAddress) | 538 if (startOfGap != headerAddress) |
| 539 addToFreeList(startOfGap, headerAddress - startOfGap); | 539 addToFreeList(startOfGap, headerAddress - startOfGap); |
| 540 | 540 |
| 541 headerAddress += size; | 541 headerAddress += size; |
| 542 startOfGap = headerAddress; | 542 startOfGap = headerAddress; |
| 543 } | 543 } |
| 544 | 544 |
| 545 if (startOfGap != page->payloadEnd()) | 545 if (startOfGap != page->payloadEnd()) |
| 546 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); | 546 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); |
| 547 } | 547 } |
| 548 Heap::decreaseAllocatedObjectSize(freedSize); | 548 threadState()->decreaseAllocatedObjectSize(freedSize); |
| 549 ASSERT(m_promptlyFreedSize == freedSize); | 549 ASSERT(m_promptlyFreedSize == freedSize); |
| 550 m_promptlyFreedSize = 0; | 550 m_promptlyFreedSize = 0; |
| 551 return true; | 551 return true; |
| 552 } | 552 } |
| 553 | 553 |
| 554 void NormalPageHeap::promptlyFreeObject(HeapObjectHeader* header) | 554 void NormalPageHeap::promptlyFreeObject(HeapObjectHeader* header) |
| 555 { | 555 { |
| 556 ASSERT(!threadState()->sweepForbidden()); | 556 ASSERT(!threadState()->sweepForbidden()); |
| 557 ASSERT(header->checkHeader()); | 557 ASSERT(header->checkHeader()); |
| 558 Address address = reinterpret_cast<Address>(header); | 558 Address address = reinterpret_cast<Address>(header); |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 656 } | 656 } |
| 657 | 657 |
| 658 void NormalPageHeap::setRemainingAllocationSize(size_t newRemainingAllocationSiz e) | 658 void NormalPageHeap::setRemainingAllocationSize(size_t newRemainingAllocationSiz e) |
| 659 { | 659 { |
| 660 m_remainingAllocationSize = newRemainingAllocationSize; | 660 m_remainingAllocationSize = newRemainingAllocationSize; |
| 661 | 661 |
| 662 // Sync recorded allocated-object size: | 662 // Sync recorded allocated-object size: |
| 663 // - if previous alloc checkpoint is larger, allocation size has increased. | 663 // - if previous alloc checkpoint is larger, allocation size has increased. |
| 664 // - if smaller, a net reduction in size since last call to updateRemaining AllocationSize(). | 664 // - if smaller, a net reduction in size since last call to updateRemaining AllocationSize(). |
| 665 if (m_lastRemainingAllocationSize > m_remainingAllocationSize) | 665 if (m_lastRemainingAllocationSize > m_remainingAllocationSize) |
| 666 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - m_rema iningAllocationSize); | 666 threadState()->increaseAllocatedObjectSize(m_lastRemainingAllocationSize - m_remainingAllocationSize); |
| 667 else if (m_lastRemainingAllocationSize != m_remainingAllocationSize) | 667 else if (m_lastRemainingAllocationSize != m_remainingAllocationSize) |
| 668 Heap::decreaseAllocatedObjectSize(m_remainingAllocationSize - m_lastRema iningAllocationSize); | 668 threadState()->decreaseAllocatedObjectSize(m_remainingAllocationSize - m _lastRemainingAllocationSize); |
| 669 m_lastRemainingAllocationSize = m_remainingAllocationSize; | 669 m_lastRemainingAllocationSize = m_remainingAllocationSize; |
| 670 } | 670 } |
| 671 | 671 |
| 672 void NormalPageHeap::updateRemainingAllocationSize() | 672 void NormalPageHeap::updateRemainingAllocationSize() |
| 673 { | 673 { |
| 674 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | 674 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
| 675 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize()); | 675 threadState()->increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remainingAllocationSize()); |
| 676 m_lastRemainingAllocationSize = remainingAllocationSize(); | 676 m_lastRemainingAllocationSize = remainingAllocationSize(); |
| 677 } | 677 } |
| 678 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | 678 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); |
| 679 } | 679 } |
| 680 | 680 |
| 681 void NormalPageHeap::setAllocationPoint(Address point, size_t size) | 681 void NormalPageHeap::setAllocationPoint(Address point, size_t size) |
| 682 { | 682 { |
| 683 #if ENABLE(ASSERT) | 683 #if ENABLE(ASSERT) |
| 684 if (point) { | 684 if (point) { |
| 685 ASSERT(size); | 685 ASSERT(size); |
| (...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 832 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page Memory, this, allocationSize); | 832 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page Memory, this, allocationSize); |
| 833 ASSERT(header->checkHeader()); | 833 ASSERT(header->checkHeader()); |
| 834 | 834 |
| 835 // Poison the object header and allocationGranularity bytes after the object | 835 // Poison the object header and allocationGranularity bytes after the object |
| 836 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 836 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
| 837 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); | 837 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); |
| 838 | 838 |
| 839 largeObject->link(&m_firstPage); | 839 largeObject->link(&m_firstPage); |
| 840 | 840 |
| 841 Heap::increaseAllocatedSpace(largeObject->size()); | 841 Heap::increaseAllocatedSpace(largeObject->size()); |
| 842 Heap::increaseAllocatedObjectSize(largeObject->size()); | 842 threadState()->increaseAllocatedObjectSize(largeObject->size()); |
| 843 return result; | 843 return result; |
| 844 } | 844 } |
| 845 | 845 |
| 846 void LargeObjectHeap::freeLargeObjectPage(LargeObjectPage* object) | 846 void LargeObjectHeap::freeLargeObjectPage(LargeObjectPage* object) |
| 847 { | 847 { |
| 848 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); | 848 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); |
| 849 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize( )); | 849 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize( )); |
| 850 Heap::decreaseAllocatedSpace(object->size()); | 850 Heap::decreaseAllocatedSpace(object->size()); |
| 851 | 851 |
| 852 // Unpoison the object header and allocationGranularity bytes after the | 852 // Unpoison the object header and allocationGranularity bytes after the |
| (...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1178 } | 1178 } |
| 1179 if (startOfGap != payloadEnd()) { | 1179 if (startOfGap != payloadEnd()) { |
| 1180 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap ); | 1180 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap ); |
| 1181 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1181 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
| 1182 if (Heap::isLowEndDevice()) | 1182 if (Heap::isLowEndDevice()) |
| 1183 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); | 1183 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); |
| 1184 #endif | 1184 #endif |
| 1185 } | 1185 } |
| 1186 | 1186 |
| 1187 if (markedObjectSize) | 1187 if (markedObjectSize) |
| 1188 Heap::increaseMarkedObjectSize(markedObjectSize); | 1188 heapForNormalPage()->threadState()->increaseMarkedObjectSize(markedObjec tSize); |
|
sof
2016/02/16 09:14:38
(I think it would be worth it to manually CSE heap
peria
2016/02/17 02:31:26
Done.
| |
| 1189 } | 1189 } |
| 1190 | 1190 |
| 1191 void NormalPage::makeConsistentForGC() | 1191 void NormalPage::makeConsistentForGC() |
| 1192 { | 1192 { |
| 1193 size_t markedObjectSize = 0; | 1193 size_t markedObjectSize = 0; |
| 1194 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1194 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1195 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); | 1195 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); |
| 1196 ASSERT(header->size() < blinkPagePayloadSize()); | 1196 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1197 // Check if a free list entry first since we cannot call | 1197 // Check if a free list entry first since we cannot call |
| 1198 // isMarked on a free list entry. | 1198 // isMarked on a free list entry. |
| 1199 if (header->isFree()) { | 1199 if (header->isFree()) { |
| 1200 headerAddress += header->size(); | 1200 headerAddress += header->size(); |
| 1201 continue; | 1201 continue; |
| 1202 } | 1202 } |
| 1203 ASSERT(header->checkHeader()); | 1203 ASSERT(header->checkHeader()); |
| 1204 if (header->isMarked()) { | 1204 if (header->isMarked()) { |
| 1205 header->unmark(); | 1205 header->unmark(); |
| 1206 markedObjectSize += header->size(); | 1206 markedObjectSize += header->size(); |
| 1207 } else { | 1207 } else { |
| 1208 header->markDead(); | 1208 header->markDead(); |
| 1209 } | 1209 } |
| 1210 headerAddress += header->size(); | 1210 headerAddress += header->size(); |
| 1211 } | 1211 } |
| 1212 if (markedObjectSize) | 1212 if (markedObjectSize) |
| 1213 Heap::increaseMarkedObjectSize(markedObjectSize); | 1213 heapForNormalPage()->threadState()->increaseMarkedObjectSize(markedObjec tSize); |
| 1214 } | 1214 } |
| 1215 | 1215 |
| 1216 void NormalPage::makeConsistentForMutator() | 1216 void NormalPage::makeConsistentForMutator() |
| 1217 { | 1217 { |
| 1218 Address startOfGap = payload(); | 1218 Address startOfGap = payload(); |
| 1219 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1219 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1220 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); | 1220 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); |
| 1221 size_t size = header->size(); | 1221 size_t size = header->size(); |
| 1222 ASSERT(size < blinkPagePayloadSize()); | 1222 ASSERT(size < blinkPagePayloadSize()); |
| 1223 if (header->isPromptlyFreed()) | 1223 if (header->isPromptlyFreed()) |
| (...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1470 } | 1470 } |
| 1471 | 1471 |
| 1472 void LargeObjectPage::removeFromHeap() | 1472 void LargeObjectPage::removeFromHeap() |
| 1473 { | 1473 { |
| 1474 static_cast<LargeObjectHeap*>(heap())->freeLargeObjectPage(this); | 1474 static_cast<LargeObjectHeap*>(heap())->freeLargeObjectPage(this); |
| 1475 } | 1475 } |
| 1476 | 1476 |
| 1477 void LargeObjectPage::sweep() | 1477 void LargeObjectPage::sweep() |
| 1478 { | 1478 { |
| 1479 heapObjectHeader()->unmark(); | 1479 heapObjectHeader()->unmark(); |
| 1480 Heap::increaseMarkedObjectSize(size()); | 1480 heap()->threadState()->increaseMarkedObjectSize(size()); |
| 1481 } | 1481 } |
| 1482 | 1482 |
| 1483 void LargeObjectPage::makeConsistentForGC() | 1483 void LargeObjectPage::makeConsistentForGC() |
| 1484 { | 1484 { |
| 1485 HeapObjectHeader* header = heapObjectHeader(); | 1485 HeapObjectHeader* header = heapObjectHeader(); |
| 1486 if (header->isMarked()) { | 1486 if (header->isMarked()) { |
| 1487 header->unmark(); | 1487 header->unmark(); |
| 1488 Heap::increaseMarkedObjectSize(size()); | 1488 heap()->threadState()->increaseMarkedObjectSize(size()); |
| 1489 } else { | 1489 } else { |
| 1490 header->markDead(); | 1490 header->markDead(); |
| 1491 } | 1491 } |
| 1492 } | 1492 } |
| 1493 | 1493 |
| 1494 void LargeObjectPage::makeConsistentForMutator() | 1494 void LargeObjectPage::makeConsistentForMutator() |
| 1495 { | 1495 { |
| 1496 HeapObjectHeader* header = heapObjectHeader(); | 1496 HeapObjectHeader* header = heapObjectHeader(); |
| 1497 if (header->isMarked()) | 1497 if (header->isMarked()) |
| 1498 header->unmark(); | 1498 header->unmark(); |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1602 | 1602 |
| 1603 m_hasEntries = true; | 1603 m_hasEntries = true; |
| 1604 size_t index = hash(address); | 1604 size_t index = hash(address); |
| 1605 ASSERT(!(index & 1)); | 1605 ASSERT(!(index & 1)); |
| 1606 Address cachePage = roundToBlinkPageStart(address); | 1606 Address cachePage = roundToBlinkPageStart(address); |
| 1607 m_entries[index + 1] = m_entries[index]; | 1607 m_entries[index + 1] = m_entries[index]; |
| 1608 m_entries[index] = cachePage; | 1608 m_entries[index] = cachePage; |
| 1609 } | 1609 } |
| 1610 | 1610 |
| 1611 } // namespace blink | 1611 } // namespace blink |
| OLD | NEW |