Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(23)

Side by Side Diff: Source/platform/heap/Heap.h

Issue 638223003: [oilpan]: Attempt to make allocation faster by only updating the GC stats when needed, rather than … Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 893 matching lines...) Expand 10 before | Expand all | Expand 10 after
904 static size_t allocationSizeFromSize(size_t); 904 static size_t allocationSizeFromSize(size_t);
905 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); 905 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
906 Address currentAllocationPoint() const { return m_currentAllocationPoint; } 906 Address currentAllocationPoint() const { return m_currentAllocationPoint; }
907 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } 907 size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
908 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } 908 bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); }
909 void setAllocationPoint(Address point, size_t size) 909 void setAllocationPoint(Address point, size_t size)
910 { 910 {
911 ASSERT(!point || heapPageFromAddress(point)); 911 ASSERT(!point || heapPageFromAddress(point));
912 ASSERT(size <= HeapPage<Header>::payloadSize()); 912 ASSERT(size <= HeapPage<Header>::payloadSize());
913 m_currentAllocationPoint = point; 913 m_currentAllocationPoint = point;
914 m_remainingAllocationSize = size; 914 if (m_lastRemainingAllocationSize != m_remainingAllocationSize)
915 stats().increaseObjectSpace(m_lastRemainingAllocationSize-m_remainin gAllocationSize);
916 m_lastRemainingAllocationSize = m_remainingAllocationSize = size;
915 } 917 }
916 void ensureCurrentAllocation(size_t, const GCInfo*); 918 void ensureCurrentAllocation(size_t, const GCInfo*);
917 bool allocateFromFreeList(size_t); 919 bool allocateFromFreeList(size_t);
918 920
919 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); 921 void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**);
920 void allocatePage(const GCInfo*); 922 void allocatePage(const GCInfo*);
921 923
922 #if ENABLE(ASSERT) 924 #if ENABLE(ASSERT)
923 bool pagesToBeSweptContains(Address); 925 bool pagesToBeSweptContains(Address);
924 bool pagesAllocatedDuringSweepingContains(Address); 926 bool pagesAllocatedDuringSweepingContains(Address);
925 #endif 927 #endif
926 928
927 void sweepNormalPages(HeapStats*); 929 void sweepNormalPages(HeapStats*);
928 void sweepLargePages(HeapStats*); 930 void sweepLargePages(HeapStats*);
929 bool coalesce(size_t); 931 bool coalesce(size_t);
930 932
931 Address m_currentAllocationPoint; 933 Address m_currentAllocationPoint;
932 size_t m_remainingAllocationSize; 934 size_t m_remainingAllocationSize;
935 size_t m_lastRemainingAllocationSize;
933 936
934 HeapPage<Header>* m_firstPage; 937 HeapPage<Header>* m_firstPage;
935 LargeHeapObject<Header>* m_firstLargeHeapObject; 938 LargeHeapObject<Header>* m_firstLargeHeapObject;
936 939
937 HeapPage<Header>* m_firstPageAllocatedDuringSweeping; 940 HeapPage<Header>* m_firstPageAllocatedDuringSweeping;
938 HeapPage<Header>* m_lastPageAllocatedDuringSweeping; 941 HeapPage<Header>* m_lastPageAllocatedDuringSweeping;
939 942
940 // Merge point for parallel sweep. 943 // Merge point for parallel sweep.
941 HeapPage<Header>* m_mergePoint; 944 HeapPage<Header>* m_mergePoint;
942 945
(...skipping 504 matching lines...) Expand 10 before | Expand all | Expand 10 after
1447 size_t allocationSize = size + sizeof(Header); 1450 size_t allocationSize = size + sizeof(Header);
1448 // Align size with allocation granularity. 1451 // Align size with allocation granularity.
1449 allocationSize = (allocationSize + allocationMask) & ~allocationMask; 1452 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
1450 return allocationSize; 1453 return allocationSize;
1451 } 1454 }
1452 1455
1453 template<typename Header> 1456 template<typename Header>
1454 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) 1457 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo)
1455 { 1458 {
1456 size_t allocationSize = allocationSizeFromSize(size); 1459 size_t allocationSize = allocationSizeFromSize(size);
1457 bool isLargeObject = allocationSize > blinkPageSize / 2; 1460 if (allocationSize <= m_remainingAllocationSize) {
1458 if (isLargeObject) 1461 Address headerAddress = m_currentAllocationPoint;
1459 return allocateLargeObject(allocationSize, gcInfo); 1462 m_currentAllocationPoint += allocationSize;
1460 if (m_remainingAllocationSize < allocationSize) 1463 m_remainingAllocationSize -= allocationSize;
1461 return outOfLineAllocate(size, gcInfo); 1464 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcI nfo);
1462 Address headerAddress = m_currentAllocationPoint; 1465 Address result = headerAddress + sizeof(*header);
1463 m_currentAllocationPoint += allocationSize; 1466 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1464 m_remainingAllocationSize -= allocationSize; 1467
1465 Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo) ; 1468 // Unpoison the memory used for the object (payload).
1466 size_t payloadSize = allocationSize - sizeof(Header); 1469 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header));
1467 stats().increaseObjectSpace(payloadSize);
1468 Address result = headerAddress + sizeof(*header);
1469 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1470 // Unpoison the memory used for the object (payload).
1471 ASAN_UNPOISON_MEMORY_REGION(result, payloadSize);
1472 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) 1470 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
1473 memset(result, 0, payloadSize); 1471 memset(result, 0, allocationSize - sizeof(Header));
1474 #endif 1472 #endif
1475 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); 1473 ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1));
1476 return result; 1474 return result;
1475 }
1476 ASSERT(allocationSize > m_remainingAllocationSize);
1477 return outOfLineAllocate(size, gcInfo);
1477 } 1478 }
1478 1479
1479 template<typename T, typename HeapTraits> 1480 template<typename T, typename HeapTraits>
1480 Address Heap::allocate(size_t size) 1481 Address Heap::allocate(size_t size)
1481 { 1482 {
1482 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); 1483 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
1483 ASSERT(state->isAllocationAllowed()); 1484 ASSERT(state->isAllocationAllowed());
1484 const GCInfo* gcInfo = GCInfoTrait<T>::get(); 1485 const GCInfo* gcInfo = GCInfoTrait<T>::get();
1485 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); 1486 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer());
1486 BaseHeap* heap = state->heap(heapIndex); 1487 BaseHeap* heap = state->heap(heapIndex);
(...skipping 1001 matching lines...) Expand 10 before | Expand all | Expand 10 after
2488 }; 2489 };
2489 2490
2490 template<typename T> 2491 template<typename T>
2491 struct IfWeakMember<WeakMember<T> > { 2492 struct IfWeakMember<WeakMember<T> > {
2492 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); } 2493 static bool isDead(Visitor* visitor, const WeakMember<T>& t) { return !visit or->isAlive(t.get()); }
2493 }; 2494 };
2494 2495
2495 } 2496 }
2496 2497
2497 #endif // Heap_h 2498 #endif // Heap_h
OLDNEW
« no previous file with comments | « no previous file | Source/platform/heap/Heap.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698