OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 429 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
440 | 440 |
441 | 441 |
442 void ThreadState::detach() | 442 void ThreadState::detach() |
443 { | 443 { |
444 ThreadState* state = current(); | 444 ThreadState* state = current(); |
445 state->cleanup(); | 445 state->cleanup(); |
446 delete state; | 446 delete state; |
447 shutdownHeapIfNecessary(); | 447 shutdownHeapIfNecessary(); |
448 } | 448 } |
449 | 449 |
450 void ThreadState::visitRoots(Visitor* visitor) | 450 void ThreadState::visitPersistentRoots(Visitor* visitor) |
451 { | 451 { |
452 { | 452 { |
453 // All threads are at safepoints so this is not strictly necessary. | 453 // All threads are at safepoints so this is not strictly necessary. |
454 // However we acquire the mutex to make mutation and traversal of this | 454 // However we acquire the mutex to make mutation and traversal of this |
455 // list symmetrical. | 455 // list symmetrical. |
456 MutexLocker locker(globalRootsMutex()); | 456 MutexLocker locker(globalRootsMutex()); |
457 globalRoots()->trace(visitor); | 457 globalRoots()->trace(visitor); |
458 } | 458 } |
459 | 459 |
460 AttachedThreadStateSet& threads = attachedThreads(); | 460 AttachedThreadStateSet& threads = attachedThreads(); |
461 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.en
d(); it != end; ++it) | 461 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.en
d(); it != end; ++it) |
462 (*it)->trace(visitor); | 462 (*it)->visitPersistents(visitor); |
463 } | 463 } |
464 | 464 |
465 void ThreadState::visitLocalRoots(Visitor* visitor) | 465 void ThreadState::visitStackRoots(Visitor* visitor) |
466 { | 466 { |
467 // We assume that orphaned pages have no objects reachable from persistent | 467 AttachedThreadStateSet& threads = attachedThreads(); |
468 // handles on other threads or CrossThreadPersistents. The only cases where | 468 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.en
d(); it != end; ++it) |
469 // this could happen is if a global conservative GC finds a "pointer" on | 469 (*it)->visitStack(visitor); |
470 // the stack or due to a programming error where an object has a dangling | |
471 // cross-thread pointer to an object on this heap. | |
472 m_persistents->trace(visitor); | |
473 } | 470 } |
474 | 471 |
475 NO_SANITIZE_ADDRESS | 472 NO_SANITIZE_ADDRESS |
476 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) | 473 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) |
477 { | 474 { |
478 #if defined(ADDRESS_SANITIZER) | 475 #if defined(ADDRESS_SANITIZER) |
479 Address* start = reinterpret_cast<Address*>(m_startOfStack); | 476 Address* start = reinterpret_cast<Address*>(m_startOfStack); |
480 Address* end = reinterpret_cast<Address*>(m_endOfStack); | 477 Address* end = reinterpret_cast<Address*>(m_endOfStack); |
481 Address* fakeFrameStart = 0; | 478 Address* fakeFrameStart = 0; |
482 Address* fakeFrameEnd = 0; | 479 Address* fakeFrameEnd = 0; |
(...skipping 13 matching lines...) Expand all Loading... |
496 for (Address* p = fakeFrameStart; p < fakeFrameEnd; p++) | 493 for (Address* p = fakeFrameStart; p < fakeFrameEnd; p++) |
497 Heap::checkAndMarkPointer(visitor, *p); | 494 Heap::checkAndMarkPointer(visitor, *p); |
498 } | 495 } |
499 } | 496 } |
500 #endif | 497 #endif |
501 } | 498 } |
502 | 499 |
503 NO_SANITIZE_ADDRESS | 500 NO_SANITIZE_ADDRESS |
504 void ThreadState::visitStack(Visitor* visitor) | 501 void ThreadState::visitStack(Visitor* visitor) |
505 { | 502 { |
| 503 if (m_stackState == NoHeapPointersOnStack) |
| 504 return; |
| 505 |
506 Address* start = reinterpret_cast<Address*>(m_startOfStack); | 506 Address* start = reinterpret_cast<Address*>(m_startOfStack); |
507 // If there is a safepoint scope marker we should stop the stack | 507 // If there is a safepoint scope marker we should stop the stack |
508 // scanning there to not touch active parts of the stack. Anything | 508 // scanning there to not touch active parts of the stack. Anything |
509 // interesting beyond that point is in the safepoint stack copy. | 509 // interesting beyond that point is in the safepoint stack copy. |
510 // If there is no scope marker the thread is blocked and we should | 510 // If there is no scope marker the thread is blocked and we should |
511 // scan all the way to the recorded end stack pointer. | 511 // scan all the way to the recorded end stack pointer. |
512 Address* end = reinterpret_cast<Address*>(m_endOfStack); | 512 Address* end = reinterpret_cast<Address*>(m_endOfStack); |
513 Address* safePointScopeMarker = reinterpret_cast<Address*>(m_safePointScopeM
arker); | 513 Address* safePointScopeMarker = reinterpret_cast<Address*>(m_safePointScopeM
arker); |
514 Address* current = safePointScopeMarker ? safePointScopeMarker : end; | 514 Address* current = safePointScopeMarker ? safePointScopeMarker : end; |
515 | 515 |
(...skipping 24 matching lines...) Expand all Loading... |
540 Heap::checkAndMarkPointer(visitor, ptr); | 540 Heap::checkAndMarkPointer(visitor, ptr); |
541 visitAsanFakeStackForPointer(visitor, ptr); | 541 visitAsanFakeStackForPointer(visitor, ptr); |
542 } | 542 } |
543 } | 543 } |
544 | 544 |
545 void ThreadState::visitPersistents(Visitor* visitor) | 545 void ThreadState::visitPersistents(Visitor* visitor) |
546 { | 546 { |
547 m_persistents->trace(visitor); | 547 m_persistents->trace(visitor); |
548 } | 548 } |
549 | 549 |
550 void ThreadState::trace(Visitor* visitor) | |
551 { | |
552 if (m_stackState == HeapPointersOnStack) | |
553 visitStack(visitor); | |
554 visitPersistents(visitor); | |
555 } | |
556 | |
557 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) | 550 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) |
558 { | 551 { |
559 // If thread is terminating ignore conservative pointers. | 552 // If thread is terminating ignore conservative pointers. |
560 if (m_isTerminating) | 553 if (m_isTerminating) |
561 return false; | 554 return false; |
562 | 555 |
563 // This checks for normal pages and for large objects which span the extent | 556 // This checks for normal pages and for large objects which span the extent |
564 // of several normal pages. | 557 // of several normal pages. |
565 BaseHeapPage* page = heapPageFromAddress(address); | 558 BaseHeapPage* page = heapPageFromAddress(address); |
566 if (page) { | 559 if (page) { |
(...skipping 415 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
982 threadAttachMutex().unlock(); | 975 threadAttachMutex().unlock(); |
983 return gcInfo; | 976 return gcInfo; |
984 } | 977 } |
985 } | 978 } |
986 if (needLockForIteration) | 979 if (needLockForIteration) |
987 threadAttachMutex().unlock(); | 980 threadAttachMutex().unlock(); |
988 return 0; | 981 return 0; |
989 } | 982 } |
990 #endif | 983 #endif |
991 } | 984 } |
OLD | NEW |