Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(809)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 747363005: Oilpan: Introduce a state transition model for Oilpan GC states (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 409 matching lines...) Expand 10 before | Expand all | Expand 10 after
420 TRACE_EVENT0("blink_gc", "Heap::GCScope"); 420 TRACE_EVENT0("blink_gc", "Heap::GCScope");
421 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); 421 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
422 if (m_state->isMainThread()) 422 if (m_state->isMainThread())
423 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); 423 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting");
424 424
425 m_state->checkThread(); 425 m_state->checkThread();
426 426
427 // FIXME: in an unlikely coincidence that two threads decide 427 // FIXME: in an unlikely coincidence that two threads decide
428 // to collect garbage at the same time, avoid doing two GCs in 428 // to collect garbage at the same time, avoid doing two GCs in
429 // a row. 429 // a row.
430 RELEASE_ASSERT(!m_state->isInGC());
431 RELEASE_ASSERT(!m_state->isSweepInProgress());
432 if (LIKELY(ThreadState::stopThreads())) { 430 if (LIKELY(ThreadState::stopThreads())) {
433 m_parkedAllThreads = true; 431 m_parkedAllThreads = true;
434 m_state->enterGC();
435 } 432 }
436 if (m_state->isMainThread()) 433 if (m_state->isMainThread())
437 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); 434 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
438 } 435 }
439 436
440 bool allThreadsParked() { return m_parkedAllThreads; } 437 bool allThreadsParked() { return m_parkedAllThreads; }
441 438
442 ~GCScope() 439 ~GCScope()
443 { 440 {
444 // Only cleanup if we parked all threads in which case the GC happened 441 // Only cleanup if we parked all threads in which case the GC happened
445 // and we need to resume the other threads. 442 // and we need to resume the other threads.
446 if (LIKELY(m_parkedAllThreads)) { 443 if (LIKELY(m_parkedAllThreads)) {
447 m_state->leaveGC();
448 ASSERT(!m_state->isInGC());
449 ThreadState::resumeThreads(); 444 ThreadState::resumeThreads();
450 } 445 }
451 } 446 }
452 447
453 private: 448 private:
454 ThreadState* m_state; 449 ThreadState* m_state;
455 ThreadState::SafePointScope m_safePointScope; 450 ThreadState::SafePointScope m_safePointScope;
456 bool m_parkedAllThreads; // False if we fail to park all threads 451 bool m_parkedAllThreads; // False if we fail to park all threads
457 }; 452 };
458 453
(...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after
686 { 681 {
687 ASSERT(allocationSize > remainingAllocationSize()); 682 ASSERT(allocationSize > remainingAllocationSize());
688 if (allocationSize > blinkPageSize / 2) 683 if (allocationSize > blinkPageSize / 2)
689 return allocateLargeObject(allocationSize, gcInfo); 684 return allocateLargeObject(allocationSize, gcInfo);
690 685
691 updateRemainingAllocationSize(); 686 updateRemainingAllocationSize();
692 if (threadState()->shouldGC()) { 687 if (threadState()->shouldGC()) {
693 if (threadState()->shouldForceConservativeGC()) 688 if (threadState()->shouldForceConservativeGC())
694 Heap::collectGarbage(ThreadState::HeapPointersOnStack); 689 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
695 else 690 else
696 threadState()->setGCRequested(); 691 threadState()->requestGC();
697 } 692 }
698 ensureCurrentAllocation(allocationSize, gcInfo); 693 ensureCurrentAllocation(allocationSize, gcInfo);
699 return allocate(payloadSize, gcInfo); 694 return allocate(payloadSize, gcInfo);
700 } 695 }
701 696
702 template<typename Header> 697 template<typename Header>
703 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize) 698 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
704 { 699 {
705 ASSERT(!hasCurrentAllocationArea()); 700 ASSERT(!hasCurrentAllocationArea());
706 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; 701 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex;
(...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after
1021 allocationSize += headerPadding<Header>(); 1016 allocationSize += headerPadding<Header>();
1022 1017
1023 // If ASan is supported we add allocationGranularity bytes to the allocated space and 1018 // If ASan is supported we add allocationGranularity bytes to the allocated space and
1024 // poison that to detect overflows 1019 // poison that to detect overflows
1025 #if defined(ADDRESS_SANITIZER) 1020 #if defined(ADDRESS_SANITIZER)
1026 allocationSize += allocationGranularity; 1021 allocationSize += allocationGranularity;
1027 #endif 1022 #endif
1028 1023
1029 updateRemainingAllocationSize(); 1024 updateRemainingAllocationSize();
1030 if (m_threadState->shouldGC()) 1025 if (m_threadState->shouldGC())
1031 m_threadState->setGCRequested(); 1026 m_threadState->requestGC();
1032 m_threadState->shouldFlushHeapDoesNotContainCache(); 1027 m_threadState->shouldFlushHeapDoesNotContainCache();
1033 PageMemory* pageMemory = PageMemory::allocate(allocationSize); 1028 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
1034 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region()); 1029 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region());
1035 Address largeObjectAddress = pageMemory->writableStart(); 1030 Address largeObjectAddress = pageMemory->writableStart();
1036 Address headerAddress = largeObjectAddress + sizeof(LargeObject<Header>) + h eaderPadding<Header>(); 1031 Address headerAddress = largeObjectAddress + sizeof(LargeObject<Header>) + h eaderPadding<Header>();
1037 memset(headerAddress, 0, size); 1032 memset(headerAddress, 0, size);
1038 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); 1033 Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
1039 Address result = headerAddress + sizeof(*header); 1034 Address result = headerAddress + sizeof(*header);
1040 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 1035 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1041 LargeObject<Header>* largeObject = new (largeObjectAddress) LargeObject<Head er>(pageMemory, gcInfo, threadState()); 1036 LargeObject<Header>* largeObject = new (largeObjectAddress) LargeObject<Head er>(pageMemory, gcInfo, threadState());
(...skipping 1377 matching lines...) Expand 10 before | Expand all | Expand 10 after
2419 } 2414 }
2420 2415
2421 #if ENABLE(ASSERT) 2416 #if ENABLE(ASSERT)
2422 bool Heap::weakTableRegistered(const void* table) 2417 bool Heap::weakTableRegistered(const void* table)
2423 { 2418 {
2424 ASSERT(s_ephemeronStack); 2419 ASSERT(s_ephemeronStack);
2425 return s_ephemeronStack->hasCallbackForObject(table); 2420 return s_ephemeronStack->hasCallbackForObject(table);
2426 } 2421 }
2427 #endif 2422 #endif
2428 2423
2429 void Heap::prepareForGC() 2424 void Heap::preGC()
2430 { 2425 {
2431 ASSERT(Heap::isInGC()); 2426 ASSERT(Heap::isInGC());
2432 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2427 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2433 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) 2428 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
2434 (*it)->prepareForGC(); 2429 (*it)->preGC();
2430 }
2431
2432 void Heap::postGC()
2433 {
2434 ASSERT(Heap::isInGC());
2435 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
2436 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
2437 (*it)->postGC();
2435 } 2438 }
2436 2439
2437 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::Cause OfGC cause) 2440 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::Cause OfGC cause)
2438 { 2441 {
2439 ThreadState* state = ThreadState::current(); 2442 ThreadState* state = ThreadState::current();
2440 state->clearGCRequested(); 2443 state->setGCState(ThreadState::StoppingOtherThreads);
2441 2444
2442 GCScope gcScope(stackState); 2445 GCScope gcScope(stackState);
2443 // Check if we successfully parked the other threads. If not we bail out of the GC. 2446 // Check if we successfully parked the other threads. If not we bail out of the GC.
2444 if (!gcScope.allThreadsParked()) { 2447 if (!gcScope.allThreadsParked()) {
2445 ThreadState::current()->setGCRequested(); 2448 state->requestGC();
2446 return; 2449 return;
2447 } 2450 }
2448 2451
2449 if (state->isMainThread()) 2452 if (state->isMainThread())
2450 ScriptForbiddenScope::enter(); 2453 ScriptForbiddenScope::enter();
2451 2454
2452 s_lastGCWasConservative = false; 2455 s_lastGCWasConservative = false;
2453 2456
2454 TRACE_EVENT2("blink_gc", "Heap::collectGarbage", 2457 TRACE_EVENT2("blink_gc", "Heap::collectGarbage",
2455 "precise", stackState == ThreadState::NoHeapPointersOnStack, 2458 "precise", stackState == ThreadState::NoHeapPointersOnStack,
2456 "forced", cause == ThreadState::ForcedGC); 2459 "forced", cause == ThreadState::ForcedGC);
2457 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); 2460 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC");
2458 double timeStamp = WTF::currentTimeMS(); 2461 double timeStamp = WTF::currentTimeMS();
2459 #if ENABLE(GC_PROFILE_MARKING) 2462 #if ENABLE(GC_PROFILE_MARKING)
2460 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); 2463 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
2461 #endif 2464 #endif
2462 2465
2463 // Disallow allocation during garbage collection (but not 2466 // Disallow allocation during garbage collection (but not
2464 // during the finalization that happens when the gcScope is 2467 // during the finalization that happens when the gcScope is
2465 // torn down). 2468 // torn down).
2466 NoAllocationScope<AnyThread> noAllocationScope; 2469 NoAllocationScope<AnyThread> noAllocationScope;
2467 2470
2468 enterGC(); 2471 enterGC();
2469 prepareForGC(); 2472 preGC();
2470 2473
2471 Heap::resetMarkedObjectSize(); 2474 Heap::resetMarkedObjectSize();
2472 Heap::resetAllocatedObjectSize(); 2475 Heap::resetAllocatedObjectSize();
2473 2476
2474 // 1. trace persistent roots. 2477 // 1. trace persistent roots.
2475 ThreadState::visitPersistentRoots(s_markingVisitor); 2478 ThreadState::visitPersistentRoots(s_markingVisitor);
2476 2479
2477 // 2. trace objects reachable from the persistent roots including ephemerons . 2480 // 2. trace objects reachable from the persistent roots including ephemerons .
2478 processMarkingStack<GlobalMarking>(); 2481 processMarkingStack<GlobalMarking>();
2479 2482
2480 // 3. trace objects reachable from the stack. We do this independent of the 2483 // 3. trace objects reachable from the stack. We do this independent of the
2481 // given stackState since other threads might have a different stack state. 2484 // given stackState since other threads might have a different stack state.
2482 ThreadState::visitStackRoots(s_markingVisitor); 2485 ThreadState::visitStackRoots(s_markingVisitor);
2483 2486
2484 // 4. trace objects reachable from the stack "roots" including ephemerons. 2487 // 4. trace objects reachable from the stack "roots" including ephemerons.
2485 // Only do the processing if we found a pointer to an object on one of the 2488 // Only do the processing if we found a pointer to an object on one of the
2486 // thread stacks. 2489 // thread stacks.
2487 if (lastGCWasConservative()) { 2490 if (lastGCWasConservative()) {
2488 processMarkingStack<GlobalMarking>(); 2491 processMarkingStack<GlobalMarking>();
2489 } 2492 }
2490 2493
2491 postMarkingProcessing(); 2494 postMarkingProcessing();
2492 globalWeakProcessing(); 2495 globalWeakProcessing();
2493 2496
2494 // Now we can delete all orphaned pages because there are no dangling 2497 // Now we can delete all orphaned pages because there are no dangling
2495 // pointers to the orphaned pages. (If we have such dangling pointers, 2498 // pointers to the orphaned pages. (If we have such dangling pointers,
2496 // we should have crashed during marking before getting here.) 2499 // we should have crashed during marking before getting here.)
2497 orphanedPagePool()->decommitOrphanedPages(); 2500 orphanedPagePool()->decommitOrphanedPages();
2498 2501
2502 postGC();
2499 leaveGC(); 2503 leaveGC();
2500 2504
2501 #if ENABLE(GC_PROFILE_MARKING) 2505 #if ENABLE(GC_PROFILE_MARKING)
2502 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); 2506 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
2503 #endif 2507 #endif
2504 2508
2505 if (Platform::current()) { 2509 if (Platform::current()) {
2506 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF ::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); 2510 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF ::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
2507 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", H eap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); 2511 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", H eap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50);
2508 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace" , Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); 2512 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace" , Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50);
2509 } 2513 }
2510 2514
2511 if (state->isMainThread()) 2515 if (state->isMainThread())
2512 ScriptForbiddenScope::exit(); 2516 ScriptForbiddenScope::exit();
2513 } 2517 }
2514 2518
2515 void Heap::collectGarbageForTerminatingThread(ThreadState* state) 2519 void Heap::collectGarbageForTerminatingThread(ThreadState* state)
2516 { 2520 {
2517 // We explicitly do not enter a safepoint while doing thread specific 2521 // We explicitly do not enter a safepoint while doing thread specific
2518 // garbage collection since we don't want to allow a global GC at the 2522 // garbage collection since we don't want to allow a global GC at the
2519 // same time as a thread local GC. 2523 // same time as a thread local GC.
2520 2524
2521 { 2525 {
2522 NoAllocationScope<AnyThread> noAllocationScope; 2526 NoAllocationScope<AnyThread> noAllocationScope;
2523 2527
2524 Heap::enterGC(); 2528 enterGC();
2525 state->enterGC(); 2529 state->preGC();
2526 state->prepareForGC();
2527 2530
2528 // 1. trace the thread local persistent roots. For thread local GCs we 2531 // 1. trace the thread local persistent roots. For thread local GCs we
2529 // don't trace the stack (ie. no conservative scanning) since this is 2532 // don't trace the stack (ie. no conservative scanning) since this is
2530 // only called during thread shutdown where there should be no objects 2533 // only called during thread shutdown where there should be no objects
2531 // on the stack. 2534 // on the stack.
2532 // We also assume that orphaned pages have no objects reachable from 2535 // We also assume that orphaned pages have no objects reachable from
2533 // persistent handles on other threads or CrossThreadPersistents. The 2536 // persistent handles on other threads or CrossThreadPersistents. The
2534 // only cases where this could happen is if a subsequent conservative 2537 // only cases where this could happen is if a subsequent conservative
2535 // global GC finds a "pointer" on the stack or due to a programming 2538 // global GC finds a "pointer" on the stack or due to a programming
2536 // error where an object has a dangling cross-thread pointer to an 2539 // error where an object has a dangling cross-thread pointer to an
2537 // object on this heap. 2540 // object on this heap.
2538 state->visitPersistents(s_markingVisitor); 2541 state->visitPersistents(s_markingVisitor);
2539 2542
2540 // 2. trace objects reachable from the thread's persistent roots 2543 // 2. trace objects reachable from the thread's persistent roots
2541 // including ephemerons. 2544 // including ephemerons.
2542 processMarkingStack<ThreadLocalMarking>(); 2545 processMarkingStack<ThreadLocalMarking>();
2543 2546
2544 postMarkingProcessing(); 2547 postMarkingProcessing();
2545 globalWeakProcessing(); 2548 globalWeakProcessing();
2546 2549
2547 state->leaveGC(); 2550 state->postGC();
2548 Heap::leaveGC(); 2551 leaveGC();
2549 } 2552 }
2550 state->performPendingSweep(); 2553 state->performPendingSweep();
2551 } 2554 }
2552 2555
2553 template<CallbackInvocationMode Mode> 2556 template<CallbackInvocationMode Mode>
2554 void Heap::processMarkingStack() 2557 void Heap::processMarkingStack()
2555 { 2558 {
2556 // Ephemeron fixed point loop. 2559 // Ephemeron fixed point loop.
2557 do { 2560 do {
2558 { 2561 {
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after
2824 bool Heap::s_lastGCWasConservative = false; 2827 bool Heap::s_lastGCWasConservative = false;
2825 bool Heap::s_inGC = false; 2828 bool Heap::s_inGC = false;
2826 FreePagePool* Heap::s_freePagePool; 2829 FreePagePool* Heap::s_freePagePool;
2827 OrphanedPagePool* Heap::s_orphanedPagePool; 2830 OrphanedPagePool* Heap::s_orphanedPagePool;
2828 Heap::RegionTree* Heap::s_regionTree = 0; 2831 Heap::RegionTree* Heap::s_regionTree = 0;
2829 size_t Heap::s_allocatedObjectSize = 0; 2832 size_t Heap::s_allocatedObjectSize = 0;
2830 size_t Heap::s_allocatedSpace = 0; 2833 size_t Heap::s_allocatedSpace = 0;
2831 size_t Heap::s_markedObjectSize = 0; 2834 size_t Heap::s_markedObjectSize = 0;
2832 2835
2833 } // namespace blink 2836 } // namespace blink
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698