| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 121 } | 121 } |
| 122 | 122 |
| 123 void BaseArena::cleanupPages() | 123 void BaseArena::cleanupPages() |
| 124 { | 124 { |
| 125 clearFreeLists(); | 125 clearFreeLists(); |
| 126 | 126 |
| 127 ASSERT(!m_firstUnsweptPage); | 127 ASSERT(!m_firstUnsweptPage); |
| 128 // Add the BaseArena's pages to the orphanedPagePool. | 128 // Add the BaseArena's pages to the orphanedPagePool. |
| 129 for (BasePage* page = m_firstPage; page; page = page->next()) { | 129 for (BasePage* page = m_firstPage; page; page = page->next()) { |
| 130 Heap::decreaseAllocatedSpace(page->size()); | 130 Heap::decreaseAllocatedSpace(page->size()); |
| 131 Heap::orphanedPagePool()->addOrphanedPage(arenaIndex(), page); | 131 Heap::getOrphanedPagePool()->addOrphanedPage(arenaIndex(), page); |
| 132 } | 132 } |
| 133 m_firstPage = nullptr; | 133 m_firstPage = nullptr; |
| 134 } | 134 } |
| 135 | 135 |
| 136 void BaseArena::takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshot
Info& info) | 136 void BaseArena::takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshot
Info& info) |
| 137 { | 137 { |
| 138 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName" | 138 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName" |
| 139 WebMemoryAllocatorDump* allocatorDump = BlinkGCMemoryDumpProvider::instance(
)->createMemoryAllocatorDumpForCurrentGC(dumpBaseName); | 139 WebMemoryAllocatorDump* allocatorDump = BlinkGCMemoryDumpProvider::instance(
)->createMemoryAllocatorDumpForCurrentGC(dumpBaseName); |
| 140 size_t pageCount = 0; | 140 size_t pageCount = 0; |
| 141 BasePage::HeapSnapshotInfo heapInfo; | 141 BasePage::HeapSnapshotInfo heapInfo; |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 238 void BaseArena::prepareHeapForTermination() | 238 void BaseArena::prepareHeapForTermination() |
| 239 { | 239 { |
| 240 ASSERT(!m_firstUnsweptPage); | 240 ASSERT(!m_firstUnsweptPage); |
| 241 for (BasePage* page = m_firstPage; page; page = page->next()) { | 241 for (BasePage* page = m_firstPage; page; page = page->next()) { |
| 242 page->setTerminating(); | 242 page->setTerminating(); |
| 243 } | 243 } |
| 244 } | 244 } |
| 245 | 245 |
| 246 void BaseArena::prepareForSweep() | 246 void BaseArena::prepareForSweep() |
| 247 { | 247 { |
| 248 ASSERT(threadState()->isInGC()); | 248 ASSERT(getThreadState()->isInGC()); |
| 249 ASSERT(!m_firstUnsweptPage); | 249 ASSERT(!m_firstUnsweptPage); |
| 250 | 250 |
| 251 // Move all pages to a list of unswept pages. | 251 // Move all pages to a list of unswept pages. |
| 252 m_firstUnsweptPage = m_firstPage; | 252 m_firstUnsweptPage = m_firstPage; |
| 253 m_firstPage = nullptr; | 253 m_firstPage = nullptr; |
| 254 } | 254 } |
| 255 | 255 |
| 256 #if defined(ADDRESS_SANITIZER) | 256 #if defined(ADDRESS_SANITIZER) |
| 257 void BaseArena::poisonArena(BlinkGC::ObjectsToPoison objectsToPoison, BlinkGC::P
oisoning poisoning) | 257 void BaseArena::poisonArena(BlinkGC::ObjectsToPoison objectsToPoison, BlinkGC::P
oisoning poisoning) |
| 258 { | 258 { |
| (...skipping 19 matching lines...) Expand all Loading... |
| 278 page->poisonObjects(objectsToPoison, poisoning); | 278 page->poisonObjects(objectsToPoison, poisoning); |
| 279 } | 279 } |
| 280 #endif | 280 #endif |
| 281 | 281 |
| 282 Address BaseArena::lazySweep(size_t allocationSize, size_t gcInfoIndex) | 282 Address BaseArena::lazySweep(size_t allocationSize, size_t gcInfoIndex) |
| 283 { | 283 { |
| 284 // If there are no pages to be swept, return immediately. | 284 // If there are no pages to be swept, return immediately. |
| 285 if (!m_firstUnsweptPage) | 285 if (!m_firstUnsweptPage) |
| 286 return nullptr; | 286 return nullptr; |
| 287 | 287 |
| 288 RELEASE_ASSERT(threadState()->isSweepingInProgress()); | 288 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); |
| 289 | 289 |
| 290 // lazySweepPages() can be called recursively if finalizers invoked in | 290 // lazySweepPages() can be called recursively if finalizers invoked in |
| 291 // page->sweep() allocate memory and the allocation triggers | 291 // page->sweep() allocate memory and the allocation triggers |
| 292 // lazySweepPages(). This check prevents the sweeping from being executed | 292 // lazySweepPages(). This check prevents the sweeping from being executed |
| 293 // recursively. | 293 // recursively. |
| 294 if (threadState()->sweepForbidden()) | 294 if (getThreadState()->sweepForbidden()) |
| 295 return nullptr; | 295 return nullptr; |
| 296 | 296 |
| 297 TRACE_EVENT0("blink_gc", "BaseArena::lazySweepPages"); | 297 TRACE_EVENT0("blink_gc", "BaseArena::lazySweepPages"); |
| 298 ThreadState::SweepForbiddenScope sweepForbidden(threadState()); | 298 ThreadState::SweepForbiddenScope sweepForbidden(getThreadState()); |
| 299 ScriptForbiddenIfMainThreadScope scriptForbidden; | 299 ScriptForbiddenIfMainThreadScope scriptForbidden; |
| 300 | 300 |
| 301 double startTime = WTF::currentTimeMS(); | 301 double startTime = WTF::currentTimeMS(); |
| 302 Address result = lazySweepPages(allocationSize, gcInfoIndex); | 302 Address result = lazySweepPages(allocationSize, gcInfoIndex); |
| 303 threadState()->accumulateSweepingTime(WTF::currentTimeMS() - startTime); | 303 getThreadState()->accumulateSweepingTime(WTF::currentTimeMS() - startTime); |
| 304 Heap::reportMemoryUsageForTracing(); | 304 Heap::reportMemoryUsageForTracing(); |
| 305 | 305 |
| 306 return result; | 306 return result; |
| 307 } | 307 } |
| 308 | 308 |
| 309 void BaseArena::sweepUnsweptPage() | 309 void BaseArena::sweepUnsweptPage() |
| 310 { | 310 { |
| 311 BasePage* page = m_firstUnsweptPage; | 311 BasePage* page = m_firstUnsweptPage; |
| 312 if (page->isEmpty()) { | 312 if (page->isEmpty()) { |
| 313 page->unlink(&m_firstUnsweptPage); | 313 page->unlink(&m_firstUnsweptPage); |
| 314 page->removeFromHeap(); | 314 page->removeFromHeap(); |
| 315 } else { | 315 } else { |
| 316 // Sweep a page and move the page from m_firstUnsweptPages to | 316 // Sweep a page and move the page from m_firstUnsweptPages to |
| 317 // m_firstPages. | 317 // m_firstPages. |
| 318 page->sweep(); | 318 page->sweep(); |
| 319 page->unlink(&m_firstUnsweptPage); | 319 page->unlink(&m_firstUnsweptPage); |
| 320 page->link(&m_firstPage); | 320 page->link(&m_firstPage); |
| 321 page->markAsSwept(); | 321 page->markAsSwept(); |
| 322 } | 322 } |
| 323 } | 323 } |
| 324 | 324 |
| 325 bool BaseArena::lazySweepWithDeadline(double deadlineSeconds) | 325 bool BaseArena::lazySweepWithDeadline(double deadlineSeconds) |
| 326 { | 326 { |
| 327 // It might be heavy to call Platform::current()->monotonicallyIncreasingTim
eSeconds() | 327 // It might be heavy to call Platform::current()->monotonicallyIncreasingTim
eSeconds() |
| 328 // per page (i.e., 128 KB sweep or one LargeObject sweep), so we check | 328 // per page (i.e., 128 KB sweep or one LargeObject sweep), so we check |
| 329 // the deadline per 10 pages. | 329 // the deadline per 10 pages. |
| 330 static const int deadlineCheckInterval = 10; | 330 static const int deadlineCheckInterval = 10; |
| 331 | 331 |
| 332 RELEASE_ASSERT(threadState()->isSweepingInProgress()); | 332 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); |
| 333 ASSERT(threadState()->sweepForbidden()); | 333 ASSERT(getThreadState()->sweepForbidden()); |
| 334 ASSERT(!threadState()->isMainThread() || ScriptForbiddenScope::isScriptForbi
dden()); | 334 ASSERT(!getThreadState()->isMainThread() || ScriptForbiddenScope::isScriptFo
rbidden()); |
| 335 | 335 |
| 336 int pageCount = 1; | 336 int pageCount = 1; |
| 337 while (m_firstUnsweptPage) { | 337 while (m_firstUnsweptPage) { |
| 338 sweepUnsweptPage(); | 338 sweepUnsweptPage(); |
| 339 if (pageCount % deadlineCheckInterval == 0) { | 339 if (pageCount % deadlineCheckInterval == 0) { |
| 340 if (deadlineSeconds <= monotonicallyIncreasingTime()) { | 340 if (deadlineSeconds <= monotonicallyIncreasingTime()) { |
| 341 // Deadline has come. | 341 // Deadline has come. |
| 342 Heap::reportMemoryUsageForTracing(); | 342 Heap::reportMemoryUsageForTracing(); |
| 343 return !m_firstUnsweptPage; | 343 return !m_firstUnsweptPage; |
| 344 } | 344 } |
| 345 } | 345 } |
| 346 pageCount++; | 346 pageCount++; |
| 347 } | 347 } |
| 348 Heap::reportMemoryUsageForTracing(); | 348 Heap::reportMemoryUsageForTracing(); |
| 349 return true; | 349 return true; |
| 350 } | 350 } |
| 351 | 351 |
| 352 void BaseArena::completeSweep() | 352 void BaseArena::completeSweep() |
| 353 { | 353 { |
| 354 RELEASE_ASSERT(threadState()->isSweepingInProgress()); | 354 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); |
| 355 ASSERT(threadState()->sweepForbidden()); | 355 ASSERT(getThreadState()->sweepForbidden()); |
| 356 ASSERT(!threadState()->isMainThread() || ScriptForbiddenScope::isScriptForbi
dden()); | 356 ASSERT(!getThreadState()->isMainThread() || ScriptForbiddenScope::isScriptFo
rbidden()); |
| 357 | 357 |
| 358 while (m_firstUnsweptPage) { | 358 while (m_firstUnsweptPage) { |
| 359 sweepUnsweptPage(); | 359 sweepUnsweptPage(); |
| 360 } | 360 } |
| 361 Heap::reportMemoryUsageForTracing(); | 361 Heap::reportMemoryUsageForTracing(); |
| 362 } | 362 } |
| 363 | 363 |
| 364 NormalPageArena::NormalPageArena(ThreadState* state, int index) | 364 NormalPageArena::NormalPageArena(ThreadState* state, int index) |
| 365 : BaseArena(state, index) | 365 : BaseArena(state, index) |
| 366 , m_currentAllocationPoint(nullptr) | 366 , m_currentAllocationPoint(nullptr) |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 409 { | 409 { |
| 410 if (m_freeList.takeSnapshot(dumpName)) { | 410 if (m_freeList.takeSnapshot(dumpName)) { |
| 411 WebMemoryAllocatorDump* bucketsDump = BlinkGCMemoryDumpProvider::instanc
e()->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); | 411 WebMemoryAllocatorDump* bucketsDump = BlinkGCMemoryDumpProvider::instanc
e()->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); |
| 412 WebMemoryAllocatorDump* pagesDump = BlinkGCMemoryDumpProvider::instance(
)->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); | 412 WebMemoryAllocatorDump* pagesDump = BlinkGCMemoryDumpProvider::instance(
)->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); |
| 413 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOw
nershipEdge(pagesDump->guid(), bucketsDump->guid()); | 413 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOw
nershipEdge(pagesDump->guid(), bucketsDump->guid()); |
| 414 } | 414 } |
| 415 } | 415 } |
| 416 | 416 |
| 417 void NormalPageArena::allocatePage() | 417 void NormalPageArena::allocatePage() |
| 418 { | 418 { |
| 419 threadState()->shouldFlushHeapDoesNotContainCache(); | 419 getThreadState()->shouldFlushHeapDoesNotContainCache(); |
| 420 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(arenaIndex()); | 420 PageMemory* pageMemory = Heap::getFreePagePool()->takeFreePage(arenaIndex())
; |
| 421 | 421 |
| 422 if (!pageMemory) { | 422 if (!pageMemory) { |
| 423 // Allocate a memory region for blinkPagesPerRegion pages that | 423 // Allocate a memory region for blinkPagesPerRegion pages that |
| 424 // will each have the following layout. | 424 // will each have the following layout. |
| 425 // | 425 // |
| 426 // [ guard os page | ... payload ... | guard os page ] | 426 // [ guard os page | ... payload ... | guard os page ] |
| 427 // ^---{ aligned to blink page size } | 427 // ^---{ aligned to blink page size } |
| 428 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(); | 428 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(); |
| 429 | 429 |
| 430 // Setup the PageMemory object for each of the pages in the region. | 430 // Setup the PageMemory object for each of the pages in the region. |
| 431 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { | 431 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { |
| 432 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, i *
blinkPageSize, blinkPagePayloadSize()); | 432 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, i *
blinkPageSize, blinkPagePayloadSize()); |
| 433 // Take the first possible page ensuring that this thread actually | 433 // Take the first possible page ensuring that this thread actually |
| 434 // gets a page and add the rest to the page pool. | 434 // gets a page and add the rest to the page pool. |
| 435 if (!pageMemory) { | 435 if (!pageMemory) { |
| 436 bool result = memory->commit(); | 436 bool result = memory->commit(); |
| 437 // If you hit the ASSERT, it will mean that you're hitting | 437 // If you hit the ASSERT, it will mean that you're hitting |
| 438 // the limit of the number of mmapped regions OS can support | 438 // the limit of the number of mmapped regions OS can support |
| 439 // (e.g., /proc/sys/vm/max_map_count in Linux). | 439 // (e.g., /proc/sys/vm/max_map_count in Linux). |
| 440 RELEASE_ASSERT(result); | 440 RELEASE_ASSERT(result); |
| 441 pageMemory = memory; | 441 pageMemory = memory; |
| 442 } else { | 442 } else { |
| 443 Heap::freePagePool()->addFreePage(arenaIndex(), memory); | 443 Heap::getFreePagePool()->addFreePage(arenaIndex(), memory); |
| 444 } | 444 } |
| 445 } | 445 } |
| 446 } | 446 } |
| 447 | 447 |
| 448 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory,
this); | 448 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory,
this); |
| 449 page->link(&m_firstPage); | 449 page->link(&m_firstPage); |
| 450 | 450 |
| 451 Heap::increaseAllocatedSpace(page->size()); | 451 Heap::increaseAllocatedSpace(page->size()); |
| 452 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 452 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 453 // Allow the following addToFreeList() to add the newly allocated memory | 453 // Allow the following addToFreeList() to add the newly allocated memory |
| (...skipping 13 matching lines...) Expand all Loading... |
| 467 | 467 |
| 468 if (page->terminating()) { | 468 if (page->terminating()) { |
| 469 // The thread is shutting down and this page is being removed as a part | 469 // The thread is shutting down and this page is being removed as a part |
| 470 // of the thread local GC. In that case the object could be traced in | 470 // of the thread local GC. In that case the object could be traced in |
| 471 // the next global GC if there is a dangling pointer from a live thread | 471 // the next global GC if there is a dangling pointer from a live thread |
| 472 // heap to this dead thread heap. To guard against this, we put the | 472 // heap to this dead thread heap. To guard against this, we put the |
| 473 // page into the orphaned page pool and zap the page memory. This | 473 // page into the orphaned page pool and zap the page memory. This |
| 474 // ensures that tracing the dangling pointer in the next global GC just | 474 // ensures that tracing the dangling pointer in the next global GC just |
| 475 // crashes instead of causing use-after-frees. After the next global | 475 // crashes instead of causing use-after-frees. After the next global |
| 476 // GC, the orphaned pages are removed. | 476 // GC, the orphaned pages are removed. |
| 477 Heap::orphanedPagePool()->addOrphanedPage(arenaIndex(), page); | 477 Heap::getOrphanedPagePool()->addOrphanedPage(arenaIndex(), page); |
| 478 } else { | 478 } else { |
| 479 PageMemory* memory = page->storage(); | 479 PageMemory* memory = page->storage(); |
| 480 page->~NormalPage(); | 480 page->~NormalPage(); |
| 481 Heap::freePagePool()->addFreePage(arenaIndex(), memory); | 481 Heap::getFreePagePool()->addFreePage(arenaIndex(), memory); |
| 482 } | 482 } |
| 483 } | 483 } |
| 484 | 484 |
| 485 bool NormalPageArena::coalesce() | 485 bool NormalPageArena::coalesce() |
| 486 { | 486 { |
| 487 // Don't coalesce arenas if there are not enough promptly freed entries | 487 // Don't coalesce arenas if there are not enough promptly freed entries |
| 488 // to be coalesced. | 488 // to be coalesced. |
| 489 // | 489 // |
| 490 // FIXME: This threshold is determined just to optimize blink_perf | 490 // FIXME: This threshold is determined just to optimize blink_perf |
| 491 // benchmarks. Coalescing is very sensitive to the threashold and | 491 // benchmarks. Coalescing is very sensitive to the threashold and |
| 492 // we need further investigations on the coalescing scheme. | 492 // we need further investigations on the coalescing scheme. |
| 493 if (m_promptlyFreedSize < 1024 * 1024) | 493 if (m_promptlyFreedSize < 1024 * 1024) |
| 494 return false; | 494 return false; |
| 495 | 495 |
| 496 if (threadState()->sweepForbidden()) | 496 if (getThreadState()->sweepForbidden()) |
| 497 return false; | 497 return false; |
| 498 | 498 |
| 499 ASSERT(!hasCurrentAllocationArea()); | 499 ASSERT(!hasCurrentAllocationArea()); |
| 500 TRACE_EVENT0("blink_gc", "BaseArena::coalesce"); | 500 TRACE_EVENT0("blink_gc", "BaseArena::coalesce"); |
| 501 | 501 |
| 502 // Rebuild free lists. | 502 // Rebuild free lists. |
| 503 m_freeList.clear(); | 503 m_freeList.clear(); |
| 504 size_t freedSize = 0; | 504 size_t freedSize = 0; |
| 505 for (NormalPage* page = static_cast<NormalPage*>(m_firstPage); page; page =
static_cast<NormalPage*>(page->next())) { | 505 for (NormalPage* page = static_cast<NormalPage*>(m_firstPage); page; page =
static_cast<NormalPage*>(page->next())) { |
| 506 Address startOfGap = page->payload(); | 506 Address startOfGap = page->payload(); |
| (...skipping 29 matching lines...) Expand all Loading... |
| 536 if (startOfGap != headerAddress) | 536 if (startOfGap != headerAddress) |
| 537 addToFreeList(startOfGap, headerAddress - startOfGap); | 537 addToFreeList(startOfGap, headerAddress - startOfGap); |
| 538 | 538 |
| 539 headerAddress += size; | 539 headerAddress += size; |
| 540 startOfGap = headerAddress; | 540 startOfGap = headerAddress; |
| 541 } | 541 } |
| 542 | 542 |
| 543 if (startOfGap != page->payloadEnd()) | 543 if (startOfGap != page->payloadEnd()) |
| 544 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); | 544 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); |
| 545 } | 545 } |
| 546 threadState()->decreaseAllocatedObjectSize(freedSize); | 546 getThreadState()->decreaseAllocatedObjectSize(freedSize); |
| 547 ASSERT(m_promptlyFreedSize == freedSize); | 547 ASSERT(m_promptlyFreedSize == freedSize); |
| 548 m_promptlyFreedSize = 0; | 548 m_promptlyFreedSize = 0; |
| 549 return true; | 549 return true; |
| 550 } | 550 } |
| 551 | 551 |
| 552 void NormalPageArena::promptlyFreeObject(HeapObjectHeader* header) | 552 void NormalPageArena::promptlyFreeObject(HeapObjectHeader* header) |
| 553 { | 553 { |
| 554 ASSERT(!threadState()->sweepForbidden()); | 554 ASSERT(!getThreadState()->sweepForbidden()); |
| 555 ASSERT(header->checkHeader()); | 555 ASSERT(header->checkHeader()); |
| 556 Address address = reinterpret_cast<Address>(header); | 556 Address address = reinterpret_cast<Address>(header); |
| 557 Address payload = header->payload(); | 557 Address payload = header->payload(); |
| 558 size_t size = header->size(); | 558 size_t size = header->size(); |
| 559 size_t payloadSize = header->payloadSize(); | 559 size_t payloadSize = header->payloadSize(); |
| 560 ASSERT(size > 0); | 560 ASSERT(size > 0); |
| 561 ASSERT(pageFromObject(address) == findPageFromAddress(address)); | 561 ASSERT(pageFromObject(address) == findPageFromAddress(address)); |
| 562 | 562 |
| 563 { | 563 { |
| 564 ThreadState::SweepForbiddenScope forbiddenScope(threadState()); | 564 ThreadState::SweepForbiddenScope forbiddenScope(getThreadState()); |
| 565 header->finalize(payload, payloadSize); | 565 header->finalize(payload, payloadSize); |
| 566 if (address + size == m_currentAllocationPoint) { | 566 if (address + size == m_currentAllocationPoint) { |
| 567 m_currentAllocationPoint = address; | 567 m_currentAllocationPoint = address; |
| 568 setRemainingAllocationSize(m_remainingAllocationSize + size); | 568 setRemainingAllocationSize(m_remainingAllocationSize + size); |
| 569 SET_MEMORY_INACCESSIBLE(address, size); | 569 SET_MEMORY_INACCESSIBLE(address, size); |
| 570 return; | 570 return; |
| 571 } | 571 } |
| 572 SET_MEMORY_INACCESSIBLE(payload, payloadSize); | 572 SET_MEMORY_INACCESSIBLE(payload, payloadSize); |
| 573 header->markPromptlyFreed(); | 573 header->markPromptlyFreed(); |
| 574 } | 574 } |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 654 } | 654 } |
| 655 | 655 |
| 656 void NormalPageArena::setRemainingAllocationSize(size_t newRemainingAllocationSi
ze) | 656 void NormalPageArena::setRemainingAllocationSize(size_t newRemainingAllocationSi
ze) |
| 657 { | 657 { |
| 658 m_remainingAllocationSize = newRemainingAllocationSize; | 658 m_remainingAllocationSize = newRemainingAllocationSize; |
| 659 | 659 |
| 660 // Sync recorded allocated-object size: | 660 // Sync recorded allocated-object size: |
| 661 // - if previous alloc checkpoint is larger, allocation size has increased. | 661 // - if previous alloc checkpoint is larger, allocation size has increased. |
| 662 // - if smaller, a net reduction in size since last call to updateRemaining
AllocationSize(). | 662 // - if smaller, a net reduction in size since last call to updateRemaining
AllocationSize(). |
| 663 if (m_lastRemainingAllocationSize > m_remainingAllocationSize) | 663 if (m_lastRemainingAllocationSize > m_remainingAllocationSize) |
| 664 threadState()->increaseAllocatedObjectSize(m_lastRemainingAllocationSize
- m_remainingAllocationSize); | 664 getThreadState()->increaseAllocatedObjectSize(m_lastRemainingAllocationS
ize - m_remainingAllocationSize); |
| 665 else if (m_lastRemainingAllocationSize != m_remainingAllocationSize) | 665 else if (m_lastRemainingAllocationSize != m_remainingAllocationSize) |
| 666 threadState()->decreaseAllocatedObjectSize(m_remainingAllocationSize - m
_lastRemainingAllocationSize); | 666 getThreadState()->decreaseAllocatedObjectSize(m_remainingAllocationSize
- m_lastRemainingAllocationSize); |
| 667 m_lastRemainingAllocationSize = m_remainingAllocationSize; | 667 m_lastRemainingAllocationSize = m_remainingAllocationSize; |
| 668 } | 668 } |
| 669 | 669 |
| 670 void NormalPageArena::updateRemainingAllocationSize() | 670 void NormalPageArena::updateRemainingAllocationSize() |
| 671 { | 671 { |
| 672 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | 672 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
| 673 threadState()->increaseAllocatedObjectSize(m_lastRemainingAllocationSize
- remainingAllocationSize()); | 673 getThreadState()->increaseAllocatedObjectSize(m_lastRemainingAllocationS
ize - remainingAllocationSize()); |
| 674 m_lastRemainingAllocationSize = remainingAllocationSize(); | 674 m_lastRemainingAllocationSize = remainingAllocationSize(); |
| 675 } | 675 } |
| 676 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | 676 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); |
| 677 } | 677 } |
| 678 | 678 |
| 679 void NormalPageArena::setAllocationPoint(Address point, size_t size) | 679 void NormalPageArena::setAllocationPoint(Address point, size_t size) |
| 680 { | 680 { |
| 681 #if ENABLE(ASSERT) | 681 #if ENABLE(ASSERT) |
| 682 if (point) { | 682 if (point) { |
| 683 ASSERT(size); | 683 ASSERT(size); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 696 | 696 |
| 697 Address NormalPageArena::outOfLineAllocate(size_t allocationSize, size_t gcInfoI
ndex) | 697 Address NormalPageArena::outOfLineAllocate(size_t allocationSize, size_t gcInfoI
ndex) |
| 698 { | 698 { |
| 699 ASSERT(allocationSize > remainingAllocationSize()); | 699 ASSERT(allocationSize > remainingAllocationSize()); |
| 700 ASSERT(allocationSize >= allocationGranularity); | 700 ASSERT(allocationSize >= allocationGranularity); |
| 701 | 701 |
| 702 // 1. If this allocation is big enough, allocate a large object. | 702 // 1. If this allocation is big enough, allocate a large object. |
| 703 if (allocationSize >= largeObjectSizeThreshold) { | 703 if (allocationSize >= largeObjectSizeThreshold) { |
| 704 // TODO(sof): support eagerly finalized large objects, if ever needed. | 704 // TODO(sof): support eagerly finalized large objects, if ever needed. |
| 705 RELEASE_ASSERT(arenaIndex() != BlinkGC::EagerSweepArenaIndex); | 705 RELEASE_ASSERT(arenaIndex() != BlinkGC::EagerSweepArenaIndex); |
| 706 LargeObjectArena* largeObjectArena = static_cast<LargeObjectArena*>(thre
adState()->arena(BlinkGC::LargeObjectArenaIndex)); | 706 LargeObjectArena* largeObjectArena = static_cast<LargeObjectArena*>(getT
hreadState()->arena(BlinkGC::LargeObjectArenaIndex)); |
| 707 Address largeObject = largeObjectArena->allocateLargeObjectPage(allocati
onSize, gcInfoIndex); | 707 Address largeObject = largeObjectArena->allocateLargeObjectPage(allocati
onSize, gcInfoIndex); |
| 708 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject); | 708 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject); |
| 709 return largeObject; | 709 return largeObject; |
| 710 } | 710 } |
| 711 | 711 |
| 712 // 2. Try to allocate from a free list. | 712 // 2. Try to allocate from a free list. |
| 713 updateRemainingAllocationSize(); | 713 updateRemainingAllocationSize(); |
| 714 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); | 714 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); |
| 715 if (result) | 715 if (result) |
| 716 return result; | 716 return result; |
| 717 | 717 |
| 718 // 3. Reset the allocation point. | 718 // 3. Reset the allocation point. |
| 719 setAllocationPoint(nullptr, 0); | 719 setAllocationPoint(nullptr, 0); |
| 720 | 720 |
| 721 // 4. Lazily sweep pages of this heap until we find a freed area for | 721 // 4. Lazily sweep pages of this heap until we find a freed area for |
| 722 // this allocation or we finish sweeping all pages of this heap. | 722 // this allocation or we finish sweeping all pages of this heap. |
| 723 result = lazySweep(allocationSize, gcInfoIndex); | 723 result = lazySweep(allocationSize, gcInfoIndex); |
| 724 if (result) | 724 if (result) |
| 725 return result; | 725 return result; |
| 726 | 726 |
| 727 // 5. Coalesce promptly freed areas and then try to allocate from a free | 727 // 5. Coalesce promptly freed areas and then try to allocate from a free |
| 728 // list. | 728 // list. |
| 729 if (coalesce()) { | 729 if (coalesce()) { |
| 730 result = allocateFromFreeList(allocationSize, gcInfoIndex); | 730 result = allocateFromFreeList(allocationSize, gcInfoIndex); |
| 731 if (result) | 731 if (result) |
| 732 return result; | 732 return result; |
| 733 } | 733 } |
| 734 | 734 |
| 735 // 6. Complete sweeping. | 735 // 6. Complete sweeping. |
| 736 threadState()->completeSweep(); | 736 getThreadState()->completeSweep(); |
| 737 | 737 |
| 738 // 7. Check if we should trigger a GC. | 738 // 7. Check if we should trigger a GC. |
| 739 threadState()->scheduleGCIfNeeded(); | 739 getThreadState()->scheduleGCIfNeeded(); |
| 740 | 740 |
| 741 // 8. Add a new page to this heap. | 741 // 8. Add a new page to this heap. |
| 742 allocatePage(); | 742 allocatePage(); |
| 743 | 743 |
| 744 // 9. Try to allocate from a free list. This allocation must succeed. | 744 // 9. Try to allocate from a free list. This allocation must succeed. |
| 745 result = allocateFromFreeList(allocationSize, gcInfoIndex); | 745 result = allocateFromFreeList(allocationSize, gcInfoIndex); |
| 746 RELEASE_ASSERT(result); | 746 RELEASE_ASSERT(result); |
| 747 return result; | 747 return result; |
| 748 } | 748 } |
| 749 | 749 |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 790 ASSERT(!(allocationSize & allocationMask)); | 790 ASSERT(!(allocationSize & allocationMask)); |
| 791 | 791 |
| 792 // 1. Try to sweep large objects more than allocationSize bytes | 792 // 1. Try to sweep large objects more than allocationSize bytes |
| 793 // before allocating a new large object. | 793 // before allocating a new large object. |
| 794 Address result = lazySweep(allocationSize, gcInfoIndex); | 794 Address result = lazySweep(allocationSize, gcInfoIndex); |
| 795 if (result) | 795 if (result) |
| 796 return result; | 796 return result; |
| 797 | 797 |
| 798 // 2. If we have failed in sweeping allocationSize bytes, | 798 // 2. If we have failed in sweeping allocationSize bytes, |
| 799 // we complete sweeping before allocating this large object. | 799 // we complete sweeping before allocating this large object. |
| 800 threadState()->completeSweep(); | 800 getThreadState()->completeSweep(); |
| 801 | 801 |
| 802 // 3. Check if we should trigger a GC. | 802 // 3. Check if we should trigger a GC. |
| 803 threadState()->scheduleGCIfNeeded(); | 803 getThreadState()->scheduleGCIfNeeded(); |
| 804 | 804 |
| 805 return doAllocateLargeObjectPage(allocationSize, gcInfoIndex); | 805 return doAllocateLargeObjectPage(allocationSize, gcInfoIndex); |
| 806 } | 806 } |
| 807 | 807 |
| 808 Address LargeObjectArena::doAllocateLargeObjectPage(size_t allocationSize, size_
t gcInfoIndex) | 808 Address LargeObjectArena::doAllocateLargeObjectPage(size_t allocationSize, size_
t gcInfoIndex) |
| 809 { | 809 { |
| 810 size_t largeObjectSize = LargeObjectPage::pageHeaderSize() + allocationSize; | 810 size_t largeObjectSize = LargeObjectPage::pageHeaderSize() + allocationSize; |
| 811 // If ASan is supported we add allocationGranularity bytes to the allocated | 811 // If ASan is supported we add allocationGranularity bytes to the allocated |
| 812 // space and poison that to detect overflows | 812 // space and poison that to detect overflows |
| 813 #if defined(ADDRESS_SANITIZER) | 813 #if defined(ADDRESS_SANITIZER) |
| 814 largeObjectSize += allocationGranularity; | 814 largeObjectSize += allocationGranularity; |
| 815 #endif | 815 #endif |
| 816 | 816 |
| 817 threadState()->shouldFlushHeapDoesNotContainCache(); | 817 getThreadState()->shouldFlushHeapDoesNotContainCache(); |
| 818 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize); | 818 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize); |
| 819 Address largeObjectAddress = pageMemory->writableStart(); | 819 Address largeObjectAddress = pageMemory->writableStart(); |
| 820 Address headerAddress = largeObjectAddress + LargeObjectPage::pageHeaderSize
(); | 820 Address headerAddress = largeObjectAddress + LargeObjectPage::pageHeaderSize
(); |
| 821 #if ENABLE(ASSERT) | 821 #if ENABLE(ASSERT) |
| 822 // Verify that the allocated PageMemory is expectedly zeroed. | 822 // Verify that the allocated PageMemory is expectedly zeroed. |
| 823 for (size_t i = 0; i < largeObjectSize; ++i) | 823 for (size_t i = 0; i < largeObjectSize; ++i) |
| 824 ASSERT(!largeObjectAddress[i]); | 824 ASSERT(!largeObjectAddress[i]); |
| 825 #endif | 825 #endif |
| 826 ASSERT(gcInfoIndex > 0); | 826 ASSERT(gcInfoIndex > 0); |
| 827 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar
geObjectSizeInHeader, gcInfoIndex); | 827 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar
geObjectSizeInHeader, gcInfoIndex); |
| 828 Address result = headerAddress + sizeof(*header); | 828 Address result = headerAddress + sizeof(*header); |
| 829 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 829 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 830 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page
Memory, this, allocationSize); | 830 LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(page
Memory, this, allocationSize); |
| 831 ASSERT(header->checkHeader()); | 831 ASSERT(header->checkHeader()); |
| 832 | 832 |
| 833 // Poison the object header and allocationGranularity bytes after the object | 833 // Poison the object header and allocationGranularity bytes after the object |
| 834 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 834 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
| 835 ASAN_POISON_MEMORY_REGION(largeObject->getAddress() + largeObject->size(), a
llocationGranularity); | 835 ASAN_POISON_MEMORY_REGION(largeObject->getAddress() + largeObject->size(), a
llocationGranularity); |
| 836 | 836 |
| 837 largeObject->link(&m_firstPage); | 837 largeObject->link(&m_firstPage); |
| 838 | 838 |
| 839 Heap::increaseAllocatedSpace(largeObject->size()); | 839 Heap::increaseAllocatedSpace(largeObject->size()); |
| 840 threadState()->increaseAllocatedObjectSize(largeObject->size()); | 840 getThreadState()->increaseAllocatedObjectSize(largeObject->size()); |
| 841 return result; | 841 return result; |
| 842 } | 842 } |
| 843 | 843 |
| 844 void LargeObjectArena::freeLargeObjectPage(LargeObjectPage* object) | 844 void LargeObjectArena::freeLargeObjectPage(LargeObjectPage* object) |
| 845 { | 845 { |
| 846 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); | 846 ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize()); |
| 847 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize(
)); | 847 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize(
)); |
| 848 Heap::decreaseAllocatedSpace(object->size()); | 848 Heap::decreaseAllocatedSpace(object->size()); |
| 849 | 849 |
| 850 // Unpoison the object header and allocationGranularity bytes after the | 850 // Unpoison the object header and allocationGranularity bytes after the |
| 851 // object before freeing. | 851 // object before freeing. |
| 852 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea
der)); | 852 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea
der)); |
| 853 ASAN_UNPOISON_MEMORY_REGION(object->getAddress() + object->size(), allocatio
nGranularity); | 853 ASAN_UNPOISON_MEMORY_REGION(object->getAddress() + object->size(), allocatio
nGranularity); |
| 854 | 854 |
| 855 if (object->terminating()) { | 855 if (object->terminating()) { |
| 856 ASSERT(ThreadState::current()->isTerminating()); | 856 ASSERT(ThreadState::current()->isTerminating()); |
| 857 // The thread is shutting down and this page is being removed as a part | 857 // The thread is shutting down and this page is being removed as a part |
| 858 // of the thread local GC. In that case the object could be traced in | 858 // of the thread local GC. In that case the object could be traced in |
| 859 // the next global GC if there is a dangling pointer from a live thread | 859 // the next global GC if there is a dangling pointer from a live thread |
| 860 // heap to this dead thread heap. To guard against this, we put the | 860 // heap to this dead thread heap. To guard against this, we put the |
| 861 // page into the orphaned page pool and zap the page memory. This | 861 // page into the orphaned page pool and zap the page memory. This |
| 862 // ensures that tracing the dangling pointer in the next global GC just | 862 // ensures that tracing the dangling pointer in the next global GC just |
| 863 // crashes instead of causing use-after-frees. After the next global | 863 // crashes instead of causing use-after-frees. After the next global |
| 864 // GC, the orphaned pages are removed. | 864 // GC, the orphaned pages are removed. |
| 865 Heap::orphanedPagePool()->addOrphanedPage(arenaIndex(), object); | 865 Heap::getOrphanedPagePool()->addOrphanedPage(arenaIndex(), object); |
| 866 } else { | 866 } else { |
| 867 ASSERT(!ThreadState::current()->isTerminating()); | 867 ASSERT(!ThreadState::current()->isTerminating()); |
| 868 PageMemory* memory = object->storage(); | 868 PageMemory* memory = object->storage(); |
| 869 object->~LargeObjectPage(); | 869 object->~LargeObjectPage(); |
| 870 delete memory; | 870 delete memory; |
| 871 } | 871 } |
| 872 } | 872 } |
| 873 | 873 |
| 874 Address LargeObjectArena::lazySweepPages(size_t allocationSize, size_t gcInfoInd
ex) | 874 Address LargeObjectArena::lazySweepPages(size_t allocationSize, size_t gcInfoInd
ex) |
| 875 { | 875 { |
| (...skipping 294 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1170 } | 1170 } |
| 1171 if (startOfGap != payloadEnd()) { | 1171 if (startOfGap != payloadEnd()) { |
| 1172 pageArena->addToFreeList(startOfGap, payloadEnd() - startOfGap); | 1172 pageArena->addToFreeList(startOfGap, payloadEnd() - startOfGap); |
| 1173 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) | 1173 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
| 1174 if (Heap::isLowEndDevice()) | 1174 if (Heap::isLowEndDevice()) |
| 1175 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); | 1175 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); |
| 1176 #endif | 1176 #endif |
| 1177 } | 1177 } |
| 1178 | 1178 |
| 1179 if (markedObjectSize) | 1179 if (markedObjectSize) |
| 1180 pageArena->threadState()->increaseMarkedObjectSize(markedObjectSize); | 1180 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); |
| 1181 } | 1181 } |
| 1182 | 1182 |
| 1183 void NormalPage::makeConsistentForGC() | 1183 void NormalPage::makeConsistentForGC() |
| 1184 { | 1184 { |
| 1185 size_t markedObjectSize = 0; | 1185 size_t markedObjectSize = 0; |
| 1186 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1186 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1187 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1187 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
| 1188 ASSERT(header->size() < blinkPagePayloadSize()); | 1188 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1189 // Check if a free list entry first since we cannot call | 1189 // Check if a free list entry first since we cannot call |
| 1190 // isMarked on a free list entry. | 1190 // isMarked on a free list entry. |
| 1191 if (header->isFree()) { | 1191 if (header->isFree()) { |
| 1192 headerAddress += header->size(); | 1192 headerAddress += header->size(); |
| 1193 continue; | 1193 continue; |
| 1194 } | 1194 } |
| 1195 ASSERT(header->checkHeader()); | 1195 ASSERT(header->checkHeader()); |
| 1196 if (header->isMarked()) { | 1196 if (header->isMarked()) { |
| 1197 header->unmark(); | 1197 header->unmark(); |
| 1198 markedObjectSize += header->size(); | 1198 markedObjectSize += header->size(); |
| 1199 } else { | 1199 } else { |
| 1200 header->markDead(); | 1200 header->markDead(); |
| 1201 } | 1201 } |
| 1202 headerAddress += header->size(); | 1202 headerAddress += header->size(); |
| 1203 } | 1203 } |
| 1204 if (markedObjectSize) | 1204 if (markedObjectSize) |
| 1205 arenaForNormalPage()->threadState()->increaseMarkedObjectSize(markedObje
ctSize); | 1205 arenaForNormalPage()->getThreadState()->increaseMarkedObjectSize(markedO
bjectSize); |
| 1206 } | 1206 } |
| 1207 | 1207 |
| 1208 void NormalPage::makeConsistentForMutator() | 1208 void NormalPage::makeConsistentForMutator() |
| 1209 { | 1209 { |
| 1210 Address startOfGap = payload(); | 1210 Address startOfGap = payload(); |
| 1211 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1211 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1212 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1212 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
| 1213 size_t size = header->size(); | 1213 size_t size = header->size(); |
| 1214 ASSERT(size < blinkPagePayloadSize()); | 1214 ASSERT(size < blinkPagePayloadSize()); |
| 1215 if (header->isPromptlyFreed()) | 1215 if (header->isPromptlyFreed()) |
| (...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1459 } | 1459 } |
| 1460 | 1460 |
| 1461 void LargeObjectPage::removeFromHeap() | 1461 void LargeObjectPage::removeFromHeap() |
| 1462 { | 1462 { |
| 1463 static_cast<LargeObjectArena*>(arena())->freeLargeObjectPage(this); | 1463 static_cast<LargeObjectArena*>(arena())->freeLargeObjectPage(this); |
| 1464 } | 1464 } |
| 1465 | 1465 |
| 1466 void LargeObjectPage::sweep() | 1466 void LargeObjectPage::sweep() |
| 1467 { | 1467 { |
| 1468 heapObjectHeader()->unmark(); | 1468 heapObjectHeader()->unmark(); |
| 1469 arena()->threadState()->increaseMarkedObjectSize(size()); | 1469 arena()->getThreadState()->increaseMarkedObjectSize(size()); |
| 1470 } | 1470 } |
| 1471 | 1471 |
| 1472 void LargeObjectPage::makeConsistentForGC() | 1472 void LargeObjectPage::makeConsistentForGC() |
| 1473 { | 1473 { |
| 1474 HeapObjectHeader* header = heapObjectHeader(); | 1474 HeapObjectHeader* header = heapObjectHeader(); |
| 1475 if (header->isMarked()) { | 1475 if (header->isMarked()) { |
| 1476 header->unmark(); | 1476 header->unmark(); |
| 1477 arena()->threadState()->increaseMarkedObjectSize(size()); | 1477 arena()->getThreadState()->increaseMarkedObjectSize(size()); |
| 1478 } else { | 1478 } else { |
| 1479 header->markDead(); | 1479 header->markDead(); |
| 1480 } | 1480 } |
| 1481 } | 1481 } |
| 1482 | 1482 |
| 1483 void LargeObjectPage::makeConsistentForMutator() | 1483 void LargeObjectPage::makeConsistentForMutator() |
| 1484 { | 1484 { |
| 1485 HeapObjectHeader* header = heapObjectHeader(); | 1485 HeapObjectHeader* header = heapObjectHeader(); |
| 1486 if (header->isMarked()) | 1486 if (header->isMarked()) |
| 1487 header->unmark(); | 1487 header->unmark(); |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1588 | 1588 |
| 1589 m_hasEntries = true; | 1589 m_hasEntries = true; |
| 1590 size_t index = hash(address); | 1590 size_t index = hash(address); |
| 1591 ASSERT(!(index & 1)); | 1591 ASSERT(!(index & 1)); |
| 1592 Address cachePage = roundToBlinkPageStart(address); | 1592 Address cachePage = roundToBlinkPageStart(address); |
| 1593 m_entries[index + 1] = m_entries[index]; | 1593 m_entries[index + 1] = m_entries[index]; |
| 1594 m_entries[index] = cachePage; | 1594 m_entries[index] = cachePage; |
| 1595 } | 1595 } |
| 1596 | 1596 |
| 1597 } // namespace blink | 1597 } // namespace blink |
| OLD | NEW |