OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
285 // makeConsistentForMutator() drops marks from marked objects and rebuild | 285 // makeConsistentForMutator() drops marks from marked objects and rebuild |
286 // free lists. This is called after taking a snapshot and before resuming | 286 // free lists. This is called after taking a snapshot and before resuming |
287 // the executions of mutators. | 287 // the executions of mutators. |
288 void makeConsistentForMutator(); | 288 void makeConsistentForMutator(); |
289 | 289 |
290 // Support for disallowing allocation. Mainly used for sanity | 290 // Support for disallowing allocation. Mainly used for sanity |
291 // checks asserts. | 291 // checks asserts. |
292 bool isAllocationAllowed() const { return !isAtSafePoint() && !m_noAllocatio
nCount; } | 292 bool isAllocationAllowed() const { return !isAtSafePoint() && !m_noAllocatio
nCount; } |
293 void enterNoAllocationScope() { m_noAllocationCount++; } | 293 void enterNoAllocationScope() { m_noAllocationCount++; } |
294 void leaveNoAllocationScope() { m_noAllocationCount--; } | 294 void leaveNoAllocationScope() { m_noAllocationCount--; } |
295 bool isGCForbidden() const { return m_gcForbiddenCount; } | 295 |
| 296 // By entering a gc-forbidden scope, conservative GCs will not |
| 297 // be allowed while handling an out-of-line allocation request. |
| 298 // Intended used when constructing subclasses of GC mixins, where |
| 299 // the object being constructed cannot be safely traced & marked |
| 300 // fully should a GC be allowed while its subclasses are being |
| 301 // constructed. |
| 302 bool isGCForbidden() const { return m_gcForbiddenCount || isConstructingGCMi
xin(); } |
296 void enterGCForbiddenScope() { m_gcForbiddenCount++; } | 303 void enterGCForbiddenScope() { m_gcForbiddenCount++; } |
297 void leaveGCForbiddenScope() | 304 void leaveGCForbiddenScope() |
298 { | 305 { |
299 ASSERT(m_gcForbiddenCount > 0); | 306 ASSERT(m_gcForbiddenCount > 0); |
300 m_gcForbiddenCount--; | 307 m_gcForbiddenCount--; |
301 } | 308 } |
302 bool sweepForbidden() const { return m_sweepForbidden; } | 309 bool sweepForbidden() const { return m_sweepForbidden; } |
303 | 310 |
| 311 bool isConstructingGCMixin() const { return m_gcMixinMarker; } |
| 312 void startConstructingGCMixin(GarbageCollectedMixinConstructorMarker* gcMixi
nMarker) |
| 313 { |
| 314 ASSERT(checkThread()); |
| 315 if (!m_gcMixinMarker) { |
| 316 m_gcMixinMarker = gcMixinMarker; |
| 317 } |
| 318 } |
| 319 void finishConstructingGCMixin(GarbageCollectedMixinConstructorMarker* gcMix
inMarker) |
| 320 { |
| 321 ASSERT(checkThread()); |
| 322 if (m_gcMixinMarker == gcMixinMarker) { |
| 323 m_gcMixinMarker = nullptr; |
| 324 } |
| 325 } |
| 326 |
304 void flushHeapDoesNotContainCacheIfNeeded(); | 327 void flushHeapDoesNotContainCacheIfNeeded(); |
305 | 328 |
306 // Safepoint related functionality. | 329 // Safepoint related functionality. |
307 // | 330 // |
308 // When a thread attempts to perform GC it needs to stop all other threads | 331 // When a thread attempts to perform GC it needs to stop all other threads |
309 // that use the heap or at least guarantee that they will not touch any | 332 // that use the heap or at least guarantee that they will not touch any |
310 // heap allocated object until GC is complete. | 333 // heap allocated object until GC is complete. |
311 // | 334 // |
312 // We say that a thread is at a safepoint if this thread is guaranteed to | 335 // We say that a thread is at a safepoint if this thread is guaranteed to |
313 // not touch any heap allocated object or any heap related functionality unt
il | 336 // not touch any heap allocated object or any heap related functionality unt
il |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
423 } | 446 } |
424 | 447 |
425 void shouldFlushHeapDoesNotContainCache() { m_shouldFlushHeapDoesNotContainC
ache = true; } | 448 void shouldFlushHeapDoesNotContainCache() { m_shouldFlushHeapDoesNotContainC
ache = true; } |
426 | 449 |
427 void registerTraceDOMWrappers(v8::Isolate* isolate, void (*traceDOMWrappers)
(v8::Isolate*, Visitor*)) | 450 void registerTraceDOMWrappers(v8::Isolate* isolate, void (*traceDOMWrappers)
(v8::Isolate*, Visitor*)) |
428 { | 451 { |
429 m_isolate = isolate; | 452 m_isolate = isolate; |
430 m_traceDOMWrappers = traceDOMWrappers; | 453 m_traceDOMWrappers = traceDOMWrappers; |
431 } | 454 } |
432 | 455 |
433 // By entering a gc-forbidden scope, conservative GCs will not | |
434 // be allowed while handling an out-of-line allocation request. | |
435 // Intended used when constructing subclasses of GC mixins, where | |
436 // the object being constructed cannot be safely traced & marked | |
437 // fully should a GC be allowed while its subclasses are being | |
438 // constructed. | |
439 void enterGCForbiddenScopeIfNeeded(GarbageCollectedMixinConstructorMarker* g
cMixinMarker) | |
440 { | |
441 ASSERT(checkThread()); | |
442 if (!m_gcMixinMarker) { | |
443 enterGCForbiddenScope(); | |
444 m_gcMixinMarker = gcMixinMarker; | |
445 } | |
446 } | |
447 void leaveGCForbiddenScopeIfNeeded(GarbageCollectedMixinConstructorMarker* g
cMixinMarker) | |
448 { | |
449 ASSERT(checkThread()); | |
450 if (m_gcMixinMarker == gcMixinMarker) { | |
451 leaveGCForbiddenScope(); | |
452 m_gcMixinMarker = nullptr; | |
453 } | |
454 } | |
455 | |
456 // vectorBackingHeap() returns a heap that the vector allocation should use. | 456 // vectorBackingHeap() returns a heap that the vector allocation should use. |
457 // We have four vector heaps and want to choose the best heap here. | 457 // We have four vector heaps and want to choose the best heap here. |
458 // | 458 // |
459 // The goal is to improve the succession rate where expand and | 459 // The goal is to improve the succession rate where expand and |
460 // promptlyFree happen at an allocation point. This is a key for reusing | 460 // promptlyFree happen at an allocation point. This is a key for reusing |
461 // the same memory as much as possible and thus improves performance. | 461 // the same memory as much as possible and thus improves performance. |
462 // To achieve the goal, we use the following heuristics: | 462 // To achieve the goal, we use the following heuristics: |
463 // | 463 // |
464 // - A vector that has been expanded recently is likely to be expanded | 464 // - A vector that has been expanded recently is likely to be expanded |
465 // again soon. | 465 // again soon. |
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
673 }; | 673 }; |
674 | 674 |
675 template<> class ThreadStateFor<AnyThread> { | 675 template<> class ThreadStateFor<AnyThread> { |
676 public: | 676 public: |
677 static ThreadState* state() { return ThreadState::current(); } | 677 static ThreadState* state() { return ThreadState::current(); } |
678 }; | 678 }; |
679 | 679 |
680 } // namespace blink | 680 } // namespace blink |
681 | 681 |
682 #endif // ThreadState_h | 682 #endif // ThreadState_h |
OLD | NEW |