| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 191 class TestGCScope { | 191 class TestGCScope { |
| 192 public: | 192 public: |
| 193 explicit TestGCScope(ThreadState::StackState state) | 193 explicit TestGCScope(ThreadState::StackState state) |
| 194 : m_state(ThreadState::current()) | 194 : m_state(ThreadState::current()) |
| 195 , m_safePointScope(state) | 195 , m_safePointScope(state) |
| 196 , m_parkedAllThreads(false) | 196 , m_parkedAllThreads(false) |
| 197 { | 197 { |
| 198 m_state->checkThread(); | 198 m_state->checkThread(); |
| 199 if (LIKELY(ThreadState::stopThreads())) { | 199 if (LIKELY(ThreadState::stopThreads())) { |
| 200 Heap::enterGC(); | 200 Heap::enterGC(); |
| 201 m_state->enterGC(); | |
| 202 m_parkedAllThreads = true; | 201 m_parkedAllThreads = true; |
| 203 } | 202 } |
| 204 } | 203 } |
| 205 | 204 |
| 206 bool allThreadsParked() { return m_parkedAllThreads; } | 205 bool allThreadsParked() { return m_parkedAllThreads; } |
| 207 | 206 |
| 208 ~TestGCScope() | 207 ~TestGCScope() |
| 209 { | 208 { |
| 210 // Only cleanup if we parked all threads in which case the GC happened | 209 // Only cleanup if we parked all threads in which case the GC happened |
| 211 // and we need to resume the other threads. | 210 // and we need to resume the other threads. |
| 212 if (LIKELY(m_parkedAllThreads)) { | 211 if (LIKELY(m_parkedAllThreads)) { |
| 213 m_state->leaveGC(); | |
| 214 Heap::leaveGC(); | 212 Heap::leaveGC(); |
| 215 ThreadState::resumeThreads(); | 213 ThreadState::resumeThreads(); |
| 216 } | 214 } |
| 217 } | 215 } |
| 218 | 216 |
| 219 private: | 217 private: |
| 220 ThreadState* m_state; | 218 ThreadState* m_state; |
| 221 ThreadState::SafePointScope m_safePointScope; | 219 ThreadState::SafePointScope m_safePointScope; |
| 222 bool m_parkedAllThreads; // False if we fail to park all threads | 220 bool m_parkedAllThreads; // False if we fail to park all threads |
| 223 }; | 221 }; |
| (...skipping 3122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3346 // This is a low-level test where we call checkAndMarkPointer. This method | 3344 // This is a low-level test where we call checkAndMarkPointer. This method |
| 3347 // causes the object start bitmap to be computed which requires the heap | 3345 // causes the object start bitmap to be computed which requires the heap |
| 3348 // to be in a consistent state (e.g. the free allocation area must be put | 3346 // to be in a consistent state (e.g. the free allocation area must be put |
| 3349 // into a free list header). However when we call makeConsistentForSweeping
it | 3347 // into a free list header). However when we call makeConsistentForSweeping
it |
| 3350 // also clears out the freelists so we have to rebuild those before trying | 3348 // also clears out the freelists so we have to rebuild those before trying |
| 3351 // to allocate anything again. We do this by forcing a GC after doing the | 3349 // to allocate anything again. We do this by forcing a GC after doing the |
| 3352 // checkAndMarkPointer tests. | 3350 // checkAndMarkPointer tests. |
| 3353 { | 3351 { |
| 3354 TestGCScope scope(ThreadState::HeapPointersOnStack); | 3352 TestGCScope scope(ThreadState::HeapPointersOnStack); |
| 3355 EXPECT_TRUE(scope.allThreadsParked()); // Fail the test if we could not
park all threads. | 3353 EXPECT_TRUE(scope.allThreadsParked()); // Fail the test if we could not
park all threads. |
| 3356 Heap::prepareForGC(); | 3354 Heap::preGC(); |
| 3357 Heap::flushHeapDoesNotContainCache(); | 3355 Heap::flushHeapDoesNotContainCache(); |
| 3358 for (size_t i = 0; i < objectAddresses.size(); i++) { | 3356 for (size_t i = 0; i < objectAddresses.size(); i++) { |
| 3359 EXPECT_TRUE(Heap::checkAndMarkPointer(&visitor, objectAddresses[i]))
; | 3357 EXPECT_TRUE(Heap::checkAndMarkPointer(&visitor, objectAddresses[i]))
; |
| 3360 EXPECT_TRUE(Heap::checkAndMarkPointer(&visitor, endAddresses[i])); | 3358 EXPECT_TRUE(Heap::checkAndMarkPointer(&visitor, endAddresses[i])); |
| 3361 } | 3359 } |
| 3362 EXPECT_EQ(objectAddresses.size() * 2, visitor.count()); | 3360 EXPECT_EQ(objectAddresses.size() * 2, visitor.count()); |
| 3363 visitor.reset(); | 3361 visitor.reset(); |
| 3364 EXPECT_TRUE(Heap::checkAndMarkPointer(&visitor, largeObjectAddress)); | 3362 EXPECT_TRUE(Heap::checkAndMarkPointer(&visitor, largeObjectAddress)); |
| 3365 EXPECT_TRUE(Heap::checkAndMarkPointer(&visitor, largeObjectEndAddress)); | 3363 EXPECT_TRUE(Heap::checkAndMarkPointer(&visitor, largeObjectEndAddress)); |
| 3366 EXPECT_EQ(2ul, visitor.count()); | 3364 EXPECT_EQ(2ul, visitor.count()); |
| 3367 visitor.reset(); | 3365 visitor.reset(); |
| 3366 Heap::postGC(); |
| 3368 } | 3367 } |
| 3369 // This forces a GC without stack scanning which results in the objects | 3368 // This forces a GC without stack scanning which results in the objects |
| 3370 // being collected. This will also rebuild the above mentioned freelists, | 3369 // being collected. This will also rebuild the above mentioned freelists, |
| 3371 // however we don't rely on that below since we don't have any allocations. | 3370 // however we don't rely on that below since we don't have any allocations. |
| 3372 clearOutOldGarbage(); | 3371 clearOutOldGarbage(); |
| 3373 { | 3372 { |
| 3374 TestGCScope scope(ThreadState::HeapPointersOnStack); | 3373 TestGCScope scope(ThreadState::HeapPointersOnStack); |
| 3375 EXPECT_TRUE(scope.allThreadsParked()); | 3374 EXPECT_TRUE(scope.allThreadsParked()); |
| 3376 Heap::prepareForGC(); | 3375 Heap::preGC(); |
| 3377 Heap::flushHeapDoesNotContainCache(); | 3376 Heap::flushHeapDoesNotContainCache(); |
| 3378 for (size_t i = 0; i < objectAddresses.size(); i++) { | 3377 for (size_t i = 0; i < objectAddresses.size(); i++) { |
| 3379 // We would like to assert that checkAndMarkPointer returned false | 3378 // We would like to assert that checkAndMarkPointer returned false |
| 3380 // here because the pointers no longer point into a valid object | 3379 // here because the pointers no longer point into a valid object |
| 3381 // (it's been freed by the GCs. But checkAndMarkPointer will return | 3380 // (it's been freed by the GCs. But checkAndMarkPointer will return |
| 3382 // true for any pointer that points into a heap page, regardless of | 3381 // true for any pointer that points into a heap page, regardless of |
| 3383 // whether it points at a valid object (this ensures the | 3382 // whether it points at a valid object (this ensures the |
| 3384 // correctness of the page-based on-heap address caches), so we | 3383 // correctness of the page-based on-heap address caches), so we |
| 3385 // can't make that assert. | 3384 // can't make that assert. |
| 3386 Heap::checkAndMarkPointer(&visitor, objectAddresses[i]); | 3385 Heap::checkAndMarkPointer(&visitor, objectAddresses[i]); |
| 3387 Heap::checkAndMarkPointer(&visitor, endAddresses[i]); | 3386 Heap::checkAndMarkPointer(&visitor, endAddresses[i]); |
| 3388 } | 3387 } |
| 3389 EXPECT_EQ(0ul, visitor.count()); | 3388 EXPECT_EQ(0ul, visitor.count()); |
| 3390 Heap::checkAndMarkPointer(&visitor, largeObjectAddress); | 3389 Heap::checkAndMarkPointer(&visitor, largeObjectAddress); |
| 3391 Heap::checkAndMarkPointer(&visitor, largeObjectEndAddress); | 3390 Heap::checkAndMarkPointer(&visitor, largeObjectEndAddress); |
| 3392 EXPECT_EQ(0ul, visitor.count()); | 3391 EXPECT_EQ(0ul, visitor.count()); |
| 3392 Heap::postGC(); |
| 3393 } | 3393 } |
| 3394 // This round of GC is important to make sure that the object start | 3394 // This round of GC is important to make sure that the object start |
| 3395 // bitmap are cleared out and that the free lists are rebuild. | 3395 // bitmap are cleared out and that the free lists are rebuild. |
| 3396 clearOutOldGarbage(); | 3396 clearOutOldGarbage(); |
| 3397 } | 3397 } |
| 3398 | 3398 |
| 3399 TEST(HeapTest, VisitOffHeapCollections) | 3399 TEST(HeapTest, VisitOffHeapCollections) |
| 3400 { | 3400 { |
| 3401 clearOutOldGarbage(); | 3401 clearOutOldGarbage(); |
| 3402 IntWrapper::s_destructorCalls = 0; | 3402 IntWrapper::s_destructorCalls = 0; |
| (...skipping 1823 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5226 TEST(HeapTest, NonNodeAllocatingNodeInDestructor) | 5226 TEST(HeapTest, NonNodeAllocatingNodeInDestructor) |
| 5227 { | 5227 { |
| 5228 new NonNodeAllocatingNodeInDestructor(); | 5228 new NonNodeAllocatingNodeInDestructor(); |
| 5229 Heap::collectGarbage(ThreadState::NoHeapPointersOnStack); | 5229 Heap::collectGarbage(ThreadState::NoHeapPointersOnStack); |
| 5230 EXPECT_EQ(10, (*NonNodeAllocatingNodeInDestructor::s_node)->value()); | 5230 EXPECT_EQ(10, (*NonNodeAllocatingNodeInDestructor::s_node)->value()); |
| 5231 delete NonNodeAllocatingNodeInDestructor::s_node; | 5231 delete NonNodeAllocatingNodeInDestructor::s_node; |
| 5232 NonNodeAllocatingNodeInDestructor::s_node = 0; | 5232 NonNodeAllocatingNodeInDestructor::s_node = 0; |
| 5233 } | 5233 } |
| 5234 | 5234 |
| 5235 } // namespace blink | 5235 } // namespace blink |
| OLD | NEW |