| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 553 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 564 size_t size = header->size(); | 564 size_t size = header->size(); |
| 565 size_t payloadSize = header->payloadSize(); | 565 size_t payloadSize = header->payloadSize(); |
| 566 ASSERT(size > 0); | 566 ASSERT(size > 0); |
| 567 ASSERT(pageFromObject(address) == findPageFromAddress(address)); | 567 ASSERT(pageFromObject(address) == findPageFromAddress(address)); |
| 568 | 568 |
| 569 { | 569 { |
| 570 ThreadState::SweepForbiddenScope forbiddenScope(threadState()); | 570 ThreadState::SweepForbiddenScope forbiddenScope(threadState()); |
| 571 header->finalize(payload, payloadSize); | 571 header->finalize(payload, payloadSize); |
| 572 if (address + size == m_currentAllocationPoint) { | 572 if (address + size == m_currentAllocationPoint) { |
| 573 m_currentAllocationPoint = address; | 573 m_currentAllocationPoint = address; |
| 574 m_remainingAllocationSize += size; | 574 setRemainingAllocationSize(m_remainingAllocationSize + size); |
| 575 // Sync recorded allocated-object size: | |
| 576 // - if previous alloc checkpoint is larger, allocation size has in
creased. | |
| 577 // - if smaller, a net reduction in size since last call to updateR
emainingAllocationSize(). | |
| 578 if (m_lastRemainingAllocationSize > m_remainingAllocationSize) | |
| 579 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize
- m_remainingAllocationSize); | |
| 580 else if (m_lastRemainingAllocationSize != m_remainingAllocationSize) | |
| 581 Heap::decreaseAllocatedObjectSize(m_remainingAllocationSize - m_
lastRemainingAllocationSize); | |
| 582 m_lastRemainingAllocationSize = m_remainingAllocationSize; | |
| 583 SET_MEMORY_INACCESSIBLE(address, size); | 575 SET_MEMORY_INACCESSIBLE(address, size); |
| 584 return; | 576 return; |
| 585 } | 577 } |
| 586 SET_MEMORY_INACCESSIBLE(payload, payloadSize); | 578 SET_MEMORY_INACCESSIBLE(payload, payloadSize); |
| 587 header->markPromptlyFreed(); | 579 header->markPromptlyFreed(); |
| 588 } | 580 } |
| 589 | 581 |
| 590 m_promptlyFreedSize += size; | 582 m_promptlyFreedSize += size; |
| 591 } | 583 } |
| 592 | 584 |
| 593 bool NormalPageHeap::expandObject(HeapObjectHeader* header, size_t newSize) | 585 bool NormalPageHeap::expandObject(HeapObjectHeader* header, size_t newSize) |
| 594 { | 586 { |
| 595 // It's possible that Vector requests a smaller expanded size because | 587 // It's possible that Vector requests a smaller expanded size because |
| 596 // Vector::shrinkCapacity can set a capacity smaller than the actual payload | 588 // Vector::shrinkCapacity can set a capacity smaller than the actual payload |
| 597 // size. | 589 // size. |
| 598 ASSERT(header->checkHeader()); | 590 ASSERT(header->checkHeader()); |
| 599 if (header->payloadSize() >= newSize) | 591 if (header->payloadSize() >= newSize) |
| 600 return true; | 592 return true; |
| 601 size_t allocationSize = Heap::allocationSizeFromSize(newSize); | 593 size_t allocationSize = Heap::allocationSizeFromSize(newSize); |
| 602 ASSERT(allocationSize > header->size()); | 594 ASSERT(allocationSize > header->size()); |
| 603 size_t expandSize = allocationSize - header->size(); | 595 size_t expandSize = allocationSize - header->size(); |
| 604 if (isObjectAllocatedAtAllocationPoint(header) && expandSize <= m_remainingA
llocationSize) { | 596 if (isObjectAllocatedAtAllocationPoint(header) && expandSize <= m_remainingA
llocationSize) { |
| 605 m_currentAllocationPoint += expandSize; | 597 m_currentAllocationPoint += expandSize; |
| 606 m_remainingAllocationSize -= expandSize; | 598 ASSERT(m_remainingAllocationSize >= expandSize); |
| 607 | 599 setRemainingAllocationSize(m_remainingAllocationSize - expandSize); |
| 608 // Unpoison the memory used for the object (payload). | 600 // Unpoison the memory used for the object (payload). |
| 609 SET_MEMORY_ACCESSIBLE(header->payloadEnd(), expandSize); | 601 SET_MEMORY_ACCESSIBLE(header->payloadEnd(), expandSize); |
| 610 header->setSize(allocationSize); | 602 header->setSize(allocationSize); |
| 611 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); | 603 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); |
| 612 return true; | 604 return true; |
| 613 } | 605 } |
| 614 return false; | 606 return false; |
| 615 } | 607 } |
| 616 | 608 |
| 617 bool NormalPageHeap::shrinkObject(HeapObjectHeader* header, size_t newSize) | 609 bool NormalPageHeap::shrinkObject(HeapObjectHeader* header, size_t newSize) |
| 618 { | 610 { |
| 619 ASSERT(header->checkHeader()); | 611 ASSERT(header->checkHeader()); |
| 620 ASSERT(header->payloadSize() > newSize); | 612 ASSERT(header->payloadSize() > newSize); |
| 621 size_t allocationSize = Heap::allocationSizeFromSize(newSize); | 613 size_t allocationSize = Heap::allocationSizeFromSize(newSize); |
| 622 ASSERT(header->size() > allocationSize); | 614 ASSERT(header->size() > allocationSize); |
| 623 size_t shrinkSize = header->size() - allocationSize; | 615 size_t shrinkSize = header->size() - allocationSize; |
| 624 if (isObjectAllocatedAtAllocationPoint(header)) { | 616 if (isObjectAllocatedAtAllocationPoint(header)) { |
| 625 m_currentAllocationPoint -= shrinkSize; | 617 m_currentAllocationPoint -= shrinkSize; |
| 626 m_remainingAllocationSize += shrinkSize; | 618 setRemainingAllocationSize(m_remainingAllocationSize + shrinkSize); |
| 627 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); | 619 SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize); |
| 628 header->setSize(allocationSize); | 620 header->setSize(allocationSize); |
| 629 return true; | 621 return true; |
| 630 } | 622 } |
| 631 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); | 623 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); |
| 632 ASSERT(header->gcInfoIndex() > 0); | 624 ASSERT(header->gcInfoIndex() > 0); |
| 633 Address shrinkAddress = header->payloadEnd() - shrinkSize; | 625 Address shrinkAddress = header->payloadEnd() - shrinkSize; |
| 634 HeapObjectHeader* freedHeader = new (NotNull, shrinkAddress) HeapObjectHeade
r(shrinkSize, header->gcInfoIndex()); | 626 HeapObjectHeader* freedHeader = new (NotNull, shrinkAddress) HeapObjectHeade
r(shrinkSize, header->gcInfoIndex()); |
| 635 freedHeader->markPromptlyFreed(); | 627 freedHeader->markPromptlyFreed(); |
| 636 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFromAddr
ess(reinterpret_cast<Address>(header))); | 628 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFromAddr
ess(reinterpret_cast<Address>(header))); |
| (...skipping 23 matching lines...) Expand all Loading... |
| 660 // For NormalPage, stop lazy sweeping once we find a slot to | 652 // For NormalPage, stop lazy sweeping once we find a slot to |
| 661 // allocate a new object. | 653 // allocate a new object. |
| 662 result = allocateFromFreeList(allocationSize, gcInfoIndex); | 654 result = allocateFromFreeList(allocationSize, gcInfoIndex); |
| 663 if (result) | 655 if (result) |
| 664 break; | 656 break; |
| 665 } | 657 } |
| 666 } | 658 } |
| 667 return result; | 659 return result; |
| 668 } | 660 } |
| 669 | 661 |
| 662 void NormalPageHeap::setRemainingAllocationSize(size_t newRemainingAllocationSiz
e) |
| 663 { |
| 664 m_remainingAllocationSize = newRemainingAllocationSize; |
| 665 |
| 666 // Sync recorded allocated-object size: |
| 667 // - if previous alloc checkpoint is larger, allocation size has increased. |
| 668 // - if smaller, a net reduction in size since last call to updateRemaining
AllocationSize(). |
| 669 if (m_lastRemainingAllocationSize > m_remainingAllocationSize) |
| 670 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - m_rema
iningAllocationSize); |
| 671 else if (m_lastRemainingAllocationSize != m_remainingAllocationSize) |
| 672 Heap::decreaseAllocatedObjectSize(m_remainingAllocationSize - m_lastRema
iningAllocationSize); |
| 673 m_lastRemainingAllocationSize = m_remainingAllocationSize; |
| 674 } |
| 675 |
| 670 void NormalPageHeap::updateRemainingAllocationSize() | 676 void NormalPageHeap::updateRemainingAllocationSize() |
| 671 { | 677 { |
| 672 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | 678 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
| 673 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain
ingAllocationSize()); | 679 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain
ingAllocationSize()); |
| 674 m_lastRemainingAllocationSize = remainingAllocationSize(); | 680 m_lastRemainingAllocationSize = remainingAllocationSize(); |
| 675 } | 681 } |
| 676 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | 682 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); |
| 677 } | 683 } |
| 678 | 684 |
| 679 void NormalPageHeap::setAllocationPoint(Address point, size_t size) | 685 void NormalPageHeap::setAllocationPoint(Address point, size_t size) |
| (...skipping 898 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1578 | 1584 |
| 1579 m_hasEntries = true; | 1585 m_hasEntries = true; |
| 1580 size_t index = hash(address); | 1586 size_t index = hash(address); |
| 1581 ASSERT(!(index & 1)); | 1587 ASSERT(!(index & 1)); |
| 1582 Address cachePage = roundToBlinkPageStart(address); | 1588 Address cachePage = roundToBlinkPageStart(address); |
| 1583 m_entries[index + 1] = m_entries[index]; | 1589 m_entries[index + 1] = m_entries[index]; |
| 1584 m_entries[index] = cachePage; | 1590 m_entries[index] = cachePage; |
| 1585 } | 1591 } |
| 1586 | 1592 |
| 1587 } // namespace blink | 1593 } // namespace blink |
| OLD | NEW |