Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 822 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 833 void removePageFromHeap(HeapPage<Header>*); | 833 void removePageFromHeap(HeapPage<Header>*); |
| 834 | 834 |
| 835 PLATFORM_EXPORT void promptlyFreeObject(Header*); | 835 PLATFORM_EXPORT void promptlyFreeObject(Header*); |
| 836 | 836 |
| 837 private: | 837 private: |
| 838 void addPageToHeap(const GCInfo*); | 838 void addPageToHeap(const GCInfo*); |
| 839 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); | 839 PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); |
| 840 static size_t allocationSizeFromSize(size_t); | 840 static size_t allocationSizeFromSize(size_t); |
| 841 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); | 841 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); |
| 842 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 842 Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
| 843 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } | 843 size_t remainingAllocationSize() const |
| 844 { | |
| 845 RELEASE_ASSERT(m_allocationLimit >= m_currentAllocationPoint); | |
| 846 return static_cast<size_t>(m_allocationLimit - m_currentAllocationPoint) ; | |
| 847 } | |
| 844 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } | 848 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } |
| 845 void setAllocationPoint(Address point, size_t size) | 849 void setAllocationPoint(Address point, size_t size) |
| 846 { | 850 { |
| 847 ASSERT(!point || heapPageFromAddress(point)); | 851 ASSERT(!point || heapPageFromAddress(point)); |
| 848 ASSERT(size <= HeapPage<Header>::payloadSize()); | 852 ASSERT(size <= HeapPage<Header>::payloadSize()); |
| 849 m_currentAllocationPoint = point; | 853 m_currentAllocationPoint = point; |
| 850 m_remainingAllocationSize = size; | 854 m_allocationLimit = point + size; |
| 855 if (m_lastRemainingAllocationSize != remainingAllocationSize()) | |
| 856 stats().increaseObjectSpace(m_lastRemainingAllocationSize - remainin gAllocationSize()); | |
| 857 m_lastRemainingAllocationSize = remainingAllocationSize(); | |
| 851 } | 858 } |
| 852 void ensureCurrentAllocation(size_t, const GCInfo*); | 859 void ensureCurrentAllocation(size_t, const GCInfo*); |
| 853 bool allocateFromFreeList(size_t); | 860 bool allocateFromFreeList(size_t); |
| 854 | 861 |
| 855 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); | 862 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); |
| 856 void allocatePage(const GCInfo*); | 863 void allocatePage(const GCInfo*); |
| 857 | 864 |
| 858 #if ENABLE(ASSERT) | 865 #if ENABLE(ASSERT) |
| 859 bool pagesToBeSweptContains(Address); | 866 bool pagesToBeSweptContains(Address); |
| 860 bool pagesAllocatedDuringSweepingContains(Address); | 867 bool pagesAllocatedDuringSweepingContains(Address); |
| 861 #endif | 868 #endif |
| 862 | 869 |
| 863 void sweepNormalPages(HeapStats*); | 870 void sweepNormalPages(HeapStats*); |
| 864 void sweepLargePages(HeapStats*); | 871 void sweepLargePages(HeapStats*); |
| 865 bool coalesce(size_t); | 872 bool coalesce(size_t); |
| 866 | 873 |
| 867 Address m_currentAllocationPoint; | 874 Address m_currentAllocationPoint; |
| 868 size_t m_remainingAllocationSize; | 875 Address m_allocationLimit; |
| 876 size_t m_lastRemainingAllocationSize; | |
| 869 | 877 |
| 870 HeapPage<Header>* m_firstPage; | 878 HeapPage<Header>* m_firstPage; |
| 871 LargeHeapObject<Header>* m_firstLargeHeapObject; | 879 LargeHeapObject<Header>* m_firstLargeHeapObject; |
| 872 | 880 |
| 873 HeapPage<Header>* m_firstPageAllocatedDuringSweeping; | 881 HeapPage<Header>* m_firstPageAllocatedDuringSweeping; |
| 874 HeapPage<Header>* m_lastPageAllocatedDuringSweeping; | 882 HeapPage<Header>* m_lastPageAllocatedDuringSweeping; |
| 875 | 883 |
| 876 // Merge point for parallel sweep. | 884 // Merge point for parallel sweep. |
| 877 HeapPage<Header>* m_mergePoint; | 885 HeapPage<Header>* m_mergePoint; |
| 878 | 886 |
| (...skipping 513 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1392 } | 1400 } |
| 1393 | 1401 |
| 1394 size_t FinalizedHeapObjectHeader::payloadSize() | 1402 size_t FinalizedHeapObjectHeader::payloadSize() |
| 1395 { | 1403 { |
| 1396 return size() - finalizedHeaderSize; | 1404 return size() - finalizedHeaderSize; |
| 1397 } | 1405 } |
| 1398 | 1406 |
| 1399 template<typename Header> | 1407 template<typename Header> |
| 1400 size_t ThreadHeap<Header>::allocationSizeFromSize(size_t size) | 1408 size_t ThreadHeap<Header>::allocationSizeFromSize(size_t size) |
| 1401 { | 1409 { |
| 1410 size_t allocationSize = size + sizeof(Header); | |
|
Erik Corry
2014/10/13 13:20:32
You can't move this above the release assert, beca
haraken
2014/10/14 10:43:07
Thanks for catching this, done.
| |
| 1411 | |
| 1402 // Check the size before computing the actual allocation size. The | 1412 // Check the size before computing the actual allocation size. The |
| 1403 // allocation size calculation can overflow for large sizes and | 1413 // allocation size calculation can overflow for large sizes and |
| 1404 // the check therefore has to happen before any calculation on the | 1414 // the check therefore has to happen before any calculation on the |
| 1405 // size. | 1415 // size. |
| 1406 RELEASE_ASSERT(size < maxHeapObjectSize); | 1416 RELEASE_ASSERT(allocationSize < maxHeapObjectSize); |
| 1407 | 1417 |
| 1408 // Add space for header. | |
| 1409 size_t allocationSize = size + sizeof(Header); | |
| 1410 // Align size with allocation granularity. | 1418 // Align size with allocation granularity. |
| 1411 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 1419 allocationSize = (allocationSize + allocationMask) & ~allocationMask; |
| 1412 return allocationSize; | 1420 return allocationSize; |
| 1413 } | 1421 } |
| 1414 | 1422 |
| 1415 template<typename Header> | 1423 template<typename Header> |
| 1416 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) | 1424 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) |
| 1417 { | 1425 { |
| 1418 size_t allocationSize = allocationSizeFromSize(size); | 1426 size_t allocationSize = allocationSizeFromSize(size); |
| 1419 bool isLargeObject = allocationSize > blinkPageSize / 2; | 1427 Address nextAllocationPoint = m_currentAllocationPoint + allocationSize; |
| 1420 if (isLargeObject) | 1428 if (LIKELY(nextAllocationPoint <= m_allocationLimit)) { |
| 1421 return allocateLargeObject(allocationSize, gcInfo); | 1429 Address headerAddress = m_currentAllocationPoint; |
| 1422 if (m_remainingAllocationSize < allocationSize) | 1430 m_currentAllocationPoint = nextAllocationPoint; |
| 1423 return outOfLineAllocate(size, gcInfo); | 1431 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcI nfo); |
| 1424 Address headerAddress = m_currentAllocationPoint; | 1432 Address result = headerAddress + sizeof(*header); |
| 1425 m_currentAllocationPoint += allocationSize; | 1433 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 1426 m_remainingAllocationSize -= allocationSize; | 1434 |
| 1427 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo) ; | 1435 // Unpoison the memory used for the object (payload). |
| 1428 size_t payloadSize = allocationSize - sizeof(Header); | 1436 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header)); |
| 1429 stats().increaseObjectSpace(payloadSize); | |
| 1430 Address result = headerAddress + sizeof(*header); | |
| 1431 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | |
| 1432 // Unpoison the memory used for the object (payload). | |
| 1433 ASAN_UNPOISON_MEMORY_REGION(result, payloadSize); | |
| 1434 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 1437 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 1435 memset(result, 0, payloadSize); | 1438 memset(result, 0, allocationSize - sizeof(Header)); |
| 1436 #endif | 1439 #endif |
| 1437 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); | 1440 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); |
| 1438 return result; | 1441 return result; |
| 1442 } | |
| 1443 ASSERT(allocationSize > remainingAllocationSize()); | |
| 1444 return outOfLineAllocate(size, gcInfo); | |
| 1439 } | 1445 } |
| 1440 | 1446 |
| 1441 template<typename T, typename HeapTraits> | 1447 template<typename T, typename HeapTraits> |
| 1442 Address Heap::allocate(size_t size) | 1448 Address Heap::allocate(size_t size) |
| 1443 { | 1449 { |
| 1444 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1450 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
| 1445 ASSERT(state->isAllocationAllowed()); | 1451 ASSERT(state->isAllocationAllowed()); |
| 1446 const GCInfo* gcInfo = GCInfoTrait<T>::get(); | 1452 const GCInfo* gcInfo = GCInfoTrait<T>::get(); |
| 1447 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); | 1453 BaseHeap* heap = state->heap(gcInfo->heapIndex()); |
|
Erik Corry
2014/10/13 13:20:32
Why is this faster?
The HeapTraits should be know
haraken
2014/10/14 10:43:07
You're right. I confirmed that the assembly is the
| |
| 1448 BaseHeap* heap = state->heap(heapIndex); | |
| 1449 return static_cast<typename HeapTraits::HeapType*>(heap)->allocate(size, gcI nfo); | 1454 return static_cast<typename HeapTraits::HeapType*>(heap)->allocate(size, gcI nfo); |
| 1450 } | 1455 } |
| 1451 | 1456 |
| 1452 template<typename T> | 1457 template<typename T> |
| 1453 Address Heap::allocate(size_t size) | 1458 Address Heap::allocate(size_t size) |
| 1454 { | 1459 { |
| 1455 return allocate<T, HeapTypeTrait<T> >(size); | 1460 return allocate<T, HeapTypeTrait<T> >(size); |
| 1456 } | 1461 } |
| 1457 | 1462 |
| 1458 template<typename T> | 1463 template<typename T> |
| (...skipping 991 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2450 }; | 2455 }; |
| 2451 | 2456 |
| 2452 template<typename T> | 2457 template<typename T> |
| 2453 struct IfWeakMember<WeakMember<T> > { | 2458 struct IfWeakMember<WeakMember<T> > { |
| 2454 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); } | 2459 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); } |
| 2455 }; | 2460 }; |
| 2456 | 2461 |
| 2457 } | 2462 } |
| 2458 | 2463 |
| 2459 #endif // Heap_h | 2464 #endif // Heap_h |
| OLD | NEW |