OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 428 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
439 | 439 |
440 | 440 |
441 void ThreadState::detach() | 441 void ThreadState::detach() |
442 { | 442 { |
443 ThreadState* state = current(); | 443 ThreadState* state = current(); |
444 state->cleanup(); | 444 state->cleanup(); |
445 delete state; | 445 delete state; |
446 shutdownHeapIfNecessary(); | 446 shutdownHeapIfNecessary(); |
447 } | 447 } |
448 | 448 |
449 void ThreadState::visitPersistentRoots(Visitor* visitor) | 449 void ThreadState::visitRoots(Visitor* visitor) |
450 { | 450 { |
451 { | 451 { |
452 // All threads are at safepoints so this is not strictly necessary. | 452 // All threads are at safepoints so this is not strictly necessary. |
453 // However we acquire the mutex to make mutation and traversal of this | 453 // However we acquire the mutex to make mutation and traversal of this |
454 // list symmetrical. | 454 // list symmetrical. |
455 MutexLocker locker(globalRootsMutex()); | 455 MutexLocker locker(globalRootsMutex()); |
456 globalRoots()->trace(visitor); | 456 globalRoots()->trace(visitor); |
457 } | 457 } |
458 | 458 |
459 AttachedThreadStateSet& threads = attachedThreads(); | 459 AttachedThreadStateSet& threads = attachedThreads(); |
460 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.en
d(); it != end; ++it) | 460 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.en
d(); it != end; ++it) |
461 (*it)->visitPersistents(visitor); | 461 (*it)->trace(visitor); |
462 } | 462 } |
463 | 463 |
464 void ThreadState::visitStackRoots(Visitor* visitor) | 464 void ThreadState::visitLocalRoots(Visitor* visitor) |
465 { | 465 { |
466 AttachedThreadStateSet& threads = attachedThreads(); | 466 // We assume that orphaned pages have no objects reachable from persistent |
467 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.en
d(); it != end; ++it) | 467 // handles on other threads or CrossThreadPersistents. The only cases where |
468 (*it)->visitStack(visitor); | 468 // this could happen is if a global conservative GC finds a "pointer" on |
| 469 // the stack or due to a programming error where an object has a dangling |
| 470 // cross-thread pointer to an object on this heap. |
| 471 m_persistents->trace(visitor); |
469 } | 472 } |
470 | 473 |
471 NO_SANITIZE_ADDRESS | 474 NO_SANITIZE_ADDRESS |
472 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) | 475 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) |
473 { | 476 { |
474 #if defined(ADDRESS_SANITIZER) | 477 #if defined(ADDRESS_SANITIZER) |
475 Address* start = reinterpret_cast<Address*>(m_startOfStack); | 478 Address* start = reinterpret_cast<Address*>(m_startOfStack); |
476 Address* end = reinterpret_cast<Address*>(m_endOfStack); | 479 Address* end = reinterpret_cast<Address*>(m_endOfStack); |
477 Address* fakeFrameStart = 0; | 480 Address* fakeFrameStart = 0; |
478 Address* fakeFrameEnd = 0; | 481 Address* fakeFrameEnd = 0; |
(...skipping 13 matching lines...) Expand all Loading... |
492 for (Address* p = fakeFrameStart; p < fakeFrameEnd; p++) | 495 for (Address* p = fakeFrameStart; p < fakeFrameEnd; p++) |
493 Heap::checkAndMarkPointer(visitor, *p); | 496 Heap::checkAndMarkPointer(visitor, *p); |
494 } | 497 } |
495 } | 498 } |
496 #endif | 499 #endif |
497 } | 500 } |
498 | 501 |
499 NO_SANITIZE_ADDRESS | 502 NO_SANITIZE_ADDRESS |
500 void ThreadState::visitStack(Visitor* visitor) | 503 void ThreadState::visitStack(Visitor* visitor) |
501 { | 504 { |
502 if (m_stackState == NoHeapPointersOnStack) | |
503 return; | |
504 | |
505 Address* start = reinterpret_cast<Address*>(m_startOfStack); | 505 Address* start = reinterpret_cast<Address*>(m_startOfStack); |
506 // If there is a safepoint scope marker we should stop the stack | 506 // If there is a safepoint scope marker we should stop the stack |
507 // scanning there to not touch active parts of the stack. Anything | 507 // scanning there to not touch active parts of the stack. Anything |
508 // interesting beyond that point is in the safepoint stack copy. | 508 // interesting beyond that point is in the safepoint stack copy. |
509 // If there is no scope marker the thread is blocked and we should | 509 // If there is no scope marker the thread is blocked and we should |
510 // scan all the way to the recorded end stack pointer. | 510 // scan all the way to the recorded end stack pointer. |
511 Address* end = reinterpret_cast<Address*>(m_endOfStack); | 511 Address* end = reinterpret_cast<Address*>(m_endOfStack); |
512 Address* safePointScopeMarker = reinterpret_cast<Address*>(m_safePointScopeM
arker); | 512 Address* safePointScopeMarker = reinterpret_cast<Address*>(m_safePointScopeM
arker); |
513 Address* current = safePointScopeMarker ? safePointScopeMarker : end; | 513 Address* current = safePointScopeMarker ? safePointScopeMarker : end; |
514 | 514 |
(...skipping 24 matching lines...) Expand all Loading... |
539 Heap::checkAndMarkPointer(visitor, ptr); | 539 Heap::checkAndMarkPointer(visitor, ptr); |
540 visitAsanFakeStackForPointer(visitor, ptr); | 540 visitAsanFakeStackForPointer(visitor, ptr); |
541 } | 541 } |
542 } | 542 } |
543 | 543 |
544 void ThreadState::visitPersistents(Visitor* visitor) | 544 void ThreadState::visitPersistents(Visitor* visitor) |
545 { | 545 { |
546 m_persistents->trace(visitor); | 546 m_persistents->trace(visitor); |
547 } | 547 } |
548 | 548 |
| 549 void ThreadState::trace(Visitor* visitor) |
| 550 { |
| 551 if (m_stackState == HeapPointersOnStack) |
| 552 visitStack(visitor); |
| 553 visitPersistents(visitor); |
| 554 } |
| 555 |
549 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) | 556 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) |
550 { | 557 { |
551 // If thread is terminating ignore conservative pointers. | 558 // If thread is terminating ignore conservative pointers. |
552 if (m_isTerminating) | 559 if (m_isTerminating) |
553 return false; | 560 return false; |
554 | 561 |
555 // This checks for normal pages and for large objects which span the extent | 562 // This checks for normal pages and for large objects which span the extent |
556 // of several normal pages. | 563 // of several normal pages. |
557 BaseHeapPage* page = heapPageFromAddress(address); | 564 BaseHeapPage* page = heapPageFromAddress(address); |
558 if (page) { | 565 if (page) { |
(...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
963 threadAttachMutex().unlock(); | 970 threadAttachMutex().unlock(); |
964 return gcInfo; | 971 return gcInfo; |
965 } | 972 } |
966 } | 973 } |
967 if (needLockForIteration) | 974 if (needLockForIteration) |
968 threadAttachMutex().unlock(); | 975 threadAttachMutex().unlock(); |
969 return 0; | 976 return 0; |
970 } | 977 } |
971 #endif | 978 #endif |
972 } | 979 } |
OLD | NEW |