| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 225 return page; | 225 return page; |
| 226 } | 226 } |
| 227 return nullptr; | 227 return nullptr; |
| 228 } | 228 } |
| 229 #endif | 229 #endif |
| 230 | 230 |
| 231 #if ENABLE(GC_PROFILING) | 231 #if ENABLE(GC_PROFILING) |
| 232 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 | 232 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 |
| 233 void BaseHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) | 233 void BaseHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) |
| 234 { | 234 { |
| 235 ASSERT(isConsistentForSweeping()); | 235 ASSERT(isConsistentForGC()); |
| 236 size_t previousPageCount = info->pageCount; | 236 size_t previousPageCount = info->pageCount; |
| 237 | 237 |
| 238 json->beginArray("pages"); | 238 json->beginArray("pages"); |
| 239 for (BasePage* page = m_firstPage; page; page = page->next(), ++info->pageCo
unt) { | 239 for (BasePage* page = m_firstPage; page; page = page->next(), ++info->pageCo
unt) { |
| 240 // FIXME: To limit the size of the snapshot we only output "threshold" m
any page snapshots. | 240 // FIXME: To limit the size of the snapshot we only output "threshold" m
any page snapshots. |
| 241 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) { | 241 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) { |
| 242 json->beginArray(); | 242 json->beginArray(); |
| 243 json->pushInteger(reinterpret_cast<intptr_t>(page)); | 243 json->pushInteger(reinterpret_cast<intptr_t>(page)); |
| 244 page->snapshot(json, info); | 244 page->snapshot(json, info); |
| 245 json->endArray(); | 245 json->endArray(); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 264 page->countObjectsToSweep(classAgeCounts); | 264 page->countObjectsToSweep(classAgeCounts); |
| 265 } | 265 } |
| 266 | 266 |
| 267 void BaseHeap::incrementMarkedObjectsAge() | 267 void BaseHeap::incrementMarkedObjectsAge() |
| 268 { | 268 { |
| 269 for (BasePage* page = m_firstPage; page; page = page->next()) | 269 for (BasePage* page = m_firstPage; page; page = page->next()) |
| 270 page->incrementMarkedObjectsAge(); | 270 page->incrementMarkedObjectsAge(); |
| 271 } | 271 } |
| 272 #endif | 272 #endif |
| 273 | 273 |
| 274 void BaseHeap::makeConsistentForSweeping() | 274 void BaseHeap::makeConsistentForGC() |
| 275 { | 275 { |
| 276 clearFreeLists(); | 276 clearFreeLists(); |
| 277 ASSERT(isConsistentForSweeping()); | 277 ASSERT(isConsistentForGC()); |
| 278 for (BasePage* page = m_firstPage; page; page = page->next()) | 278 for (BasePage* page = m_firstPage; page; page = page->next()) |
| 279 page->markAsUnswept(); | 279 page->markAsUnswept(); |
| 280 | 280 |
| 281 // If a new GC is requested before this thread got around to sweep, | 281 // If a new GC is requested before this thread got around to sweep, |
| 282 // ie. due to the thread doing a long running operation, we clear | 282 // ie. due to the thread doing a long running operation, we clear |
| 283 // the mark bits and mark any of the dead objects as dead. The latter | 283 // the mark bits and mark any of the dead objects as dead. The latter |
| 284 // is used to ensure the next GC marking does not trace already dead | 284 // is used to ensure the next GC marking does not trace already dead |
| 285 // objects. If we trace a dead object we could end up tracing into | 285 // objects. If we trace a dead object we could end up tracing into |
| 286 // garbage or the middle of another object via the newly conservatively | 286 // garbage or the middle of another object via the newly conservatively |
| 287 // found object. | 287 // found object. |
| 288 BasePage* previousPage = nullptr; | 288 BasePage* previousPage = nullptr; |
| 289 for (BasePage* page = m_firstUnsweptPage; page; previousPage = page, page =
page->next()) { | 289 for (BasePage* page = m_firstUnsweptPage; page; previousPage = page, page =
page->next()) { |
| 290 page->markUnmarkedObjectsDead(); | 290 page->makeConsistentForGC(); |
| 291 ASSERT(!page->hasBeenSwept()); | 291 ASSERT(!page->hasBeenSwept()); |
| 292 } | 292 } |
| 293 if (previousPage) { | 293 if (previousPage) { |
| 294 ASSERT(m_firstUnsweptPage); | 294 ASSERT(m_firstUnsweptPage); |
| 295 previousPage->m_next = m_firstPage; | 295 previousPage->m_next = m_firstPage; |
| 296 m_firstPage = m_firstUnsweptPage; | 296 m_firstPage = m_firstUnsweptPage; |
| 297 m_firstUnsweptPage = nullptr; | 297 m_firstUnsweptPage = nullptr; |
| 298 } | 298 } |
| 299 ASSERT(!m_firstUnsweptPage); | 299 ASSERT(!m_firstUnsweptPage); |
| 300 } | 300 } |
| 301 | 301 |
| 302 size_t BaseHeap::objectPayloadSizeForTesting() | 302 size_t BaseHeap::objectPayloadSizeForTesting() |
| 303 { | 303 { |
| 304 ASSERT(isConsistentForSweeping()); | 304 ASSERT(isConsistentForGC()); |
| 305 ASSERT(!m_firstUnsweptPage); | 305 ASSERT(!m_firstUnsweptPage); |
| 306 | 306 |
| 307 size_t objectPayloadSize = 0; | 307 size_t objectPayloadSize = 0; |
| 308 for (BasePage* page = m_firstPage; page; page = page->next()) | 308 for (BasePage* page = m_firstPage; page; page = page->next()) |
| 309 objectPayloadSize += page->objectPayloadSizeForTesting(); | 309 objectPayloadSize += page->objectPayloadSizeForTesting(); |
| 310 return objectPayloadSize; | 310 return objectPayloadSize; |
| 311 } | 311 } |
| 312 | 312 |
| 313 void BaseHeap::prepareHeapForTermination() | 313 void BaseHeap::prepareHeapForTermination() |
| 314 { | 314 { |
| (...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 434 clearFreeLists(); | 434 clearFreeLists(); |
| 435 } | 435 } |
| 436 | 436 |
| 437 void NormalPageHeap::clearFreeLists() | 437 void NormalPageHeap::clearFreeLists() |
| 438 { | 438 { |
| 439 setAllocationPoint(nullptr, 0); | 439 setAllocationPoint(nullptr, 0); |
| 440 m_freeList.clear(); | 440 m_freeList.clear(); |
| 441 } | 441 } |
| 442 | 442 |
| 443 #if ENABLE(ASSERT) | 443 #if ENABLE(ASSERT) |
| 444 bool NormalPageHeap::isConsistentForSweeping() | 444 bool NormalPageHeap::isConsistentForGC() |
| 445 { | 445 { |
| 446 // A thread heap is consistent for sweeping if none of the pages to be swept | 446 // A thread heap is consistent for sweeping if none of the pages to be swept |
| 447 // contain a freelist block or the current allocation point. | 447 // contain a freelist block or the current allocation point. |
| 448 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | 448 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
| 449 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE
ntry; freeListEntry = freeListEntry->next()) { | 449 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE
ntry; freeListEntry = freeListEntry->next()) { |
| 450 if (pagesToBeSweptContains(freeListEntry->address())) | 450 if (pagesToBeSweptContains(freeListEntry->address())) |
| 451 return false; | 451 return false; |
| 452 } | 452 } |
| 453 } | 453 } |
| 454 if (hasCurrentAllocationArea()) { | 454 if (hasCurrentAllocationArea()) { |
| (...skipping 692 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1147 markedObjectSize += header->size(); | 1147 markedObjectSize += header->size(); |
| 1148 startOfGap = headerAddress; | 1148 startOfGap = headerAddress; |
| 1149 } | 1149 } |
| 1150 if (startOfGap != payloadEnd()) | 1150 if (startOfGap != payloadEnd()) |
| 1151 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap
); | 1151 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap
); |
| 1152 | 1152 |
| 1153 if (markedObjectSize) | 1153 if (markedObjectSize) |
| 1154 Heap::increaseMarkedObjectSize(markedObjectSize); | 1154 Heap::increaseMarkedObjectSize(markedObjectSize); |
| 1155 } | 1155 } |
| 1156 | 1156 |
| 1157 void NormalPage::markUnmarkedObjectsDead() | 1157 void NormalPage::makeConsistentForGC() |
| 1158 { | 1158 { |
| 1159 size_t markedObjectSize = 0; | 1159 size_t markedObjectSize = 0; |
| 1160 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1160 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1161 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1161 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
| 1162 ASSERT(header->size() < blinkPagePayloadSize()); | 1162 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1163 // Check if a free list entry first since we cannot call | 1163 // Check if a free list entry first since we cannot call |
| 1164 // isMarked on a free list entry. | 1164 // isMarked on a free list entry. |
| 1165 if (header->isFree()) { | 1165 if (header->isFree()) { |
| 1166 headerAddress += header->size(); | 1166 headerAddress += header->size(); |
| 1167 continue; | 1167 continue; |
| (...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1450 { | 1450 { |
| 1451 static_cast<LargeObjectHeap*>(heap())->freeLargeObjectPage(this); | 1451 static_cast<LargeObjectHeap*>(heap())->freeLargeObjectPage(this); |
| 1452 } | 1452 } |
| 1453 | 1453 |
| 1454 void LargeObjectPage::sweep() | 1454 void LargeObjectPage::sweep() |
| 1455 { | 1455 { |
| 1456 heapObjectHeader()->unmark(); | 1456 heapObjectHeader()->unmark(); |
| 1457 Heap::increaseMarkedObjectSize(size()); | 1457 Heap::increaseMarkedObjectSize(size()); |
| 1458 } | 1458 } |
| 1459 | 1459 |
| 1460 void LargeObjectPage::markUnmarkedObjectsDead() | 1460 void LargeObjectPage::makeConsistentForGC() |
| 1461 { | 1461 { |
| 1462 HeapObjectHeader* header = heapObjectHeader(); | 1462 HeapObjectHeader* header = heapObjectHeader(); |
| 1463 if (header->isMarked()) { | 1463 if (header->isMarked()) { |
| 1464 header->unmark(); | 1464 header->unmark(); |
| 1465 Heap::increaseMarkedObjectSize(size()); | 1465 Heap::increaseMarkedObjectSize(size()); |
| 1466 } else { | 1466 } else { |
| 1467 header->markDead(); | 1467 header->markDead(); |
| 1468 } | 1468 } |
| 1469 } | 1469 } |
| 1470 | 1470 |
| (...skipping 622 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2093 Platform::current()->histogramEnumeration("BlinkGC.CommittedSize", sizeI
nMB, supportedMaxSizeInMB); | 2093 Platform::current()->histogramEnumeration("BlinkGC.CommittedSize", sizeI
nMB, supportedMaxSizeInMB); |
| 2094 observedMaxSizeInMB = sizeInMB; | 2094 observedMaxSizeInMB = sizeInMB; |
| 2095 } | 2095 } |
| 2096 } | 2096 } |
| 2097 | 2097 |
| 2098 size_t Heap::objectPayloadSizeForTesting() | 2098 size_t Heap::objectPayloadSizeForTesting() |
| 2099 { | 2099 { |
| 2100 size_t objectPayloadSize = 0; | 2100 size_t objectPayloadSize = 0; |
| 2101 for (ThreadState* state : ThreadState::attachedThreads()) { | 2101 for (ThreadState* state : ThreadState::attachedThreads()) { |
| 2102 state->setGCState(ThreadState::GCRunning); | 2102 state->setGCState(ThreadState::GCRunning); |
| 2103 state->makeConsistentForSweeping(); | 2103 state->makeConsistentForGC(); |
| 2104 objectPayloadSize += state->objectPayloadSizeForTesting(); | 2104 objectPayloadSize += state->objectPayloadSizeForTesting(); |
| 2105 state->setGCState(ThreadState::EagerSweepScheduled); | 2105 state->setGCState(ThreadState::EagerSweepScheduled); |
| 2106 state->setGCState(ThreadState::Sweeping); | 2106 state->setGCState(ThreadState::Sweeping); |
| 2107 state->setGCState(ThreadState::NoGCScheduled); | 2107 state->setGCState(ThreadState::NoGCScheduled); |
| 2108 } | 2108 } |
| 2109 return objectPayloadSize; | 2109 return objectPayloadSize; |
| 2110 } | 2110 } |
| 2111 | 2111 |
| 2112 BasePage* Heap::lookup(Address address) | 2112 BasePage* Heap::lookup(Address address) |
| 2113 { | 2113 { |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2223 size_t Heap::s_allocatedObjectSize = 0; | 2223 size_t Heap::s_allocatedObjectSize = 0; |
| 2224 size_t Heap::s_allocatedSpace = 0; | 2224 size_t Heap::s_allocatedSpace = 0; |
| 2225 size_t Heap::s_markedObjectSize = 0; | 2225 size_t Heap::s_markedObjectSize = 0; |
| 2226 // We don't want to use 0 KB for the initial value because it may end up | 2226 // We don't want to use 0 KB for the initial value because it may end up |
| 2227 // triggering the first GC of some thread too prematurely. | 2227 // triggering the first GC of some thread too prematurely. |
| 2228 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024; | 2228 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024; |
| 2229 size_t Heap::s_externalObjectSizeAtLastGC = 0; | 2229 size_t Heap::s_externalObjectSizeAtLastGC = 0; |
| 2230 double Heap::s_estimatedMarkingTimePerByte = 0.0; | 2230 double Heap::s_estimatedMarkingTimePerByte = 0.0; |
| 2231 | 2231 |
| 2232 } // namespace blink | 2232 } // namespace blink |
| OLD | NEW |