| Index: base/trace_event/heap_profiler_allocation_register_unittest.cc
|
| diff --git a/base/trace_event/heap_profiler_allocation_register_unittest.cc b/base/trace_event/heap_profiler_allocation_register_unittest.cc
|
| index 7eee61aa35e04c75cea63a18828eba85c54c0e28..226c04b55ce115acd5b0010e1855237b6c71fa54 100644
|
| --- a/base/trace_event/heap_profiler_allocation_register_unittest.cc
|
| +++ b/base/trace_event/heap_profiler_allocation_register_unittest.cc
|
| @@ -245,10 +245,8 @@ TEST_F(AllocationRegisterTest, ChangeContextAfterInsertion) {
|
| EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(19), &a));
|
| }
|
|
|
| -// Check that the process aborts due to hitting the guard page when inserting
|
| -// too many elements.
|
| -#if GTEST_HAS_DEATH_TEST
|
| -TEST_F(AllocationRegisterTest, OverflowDeathTest) {
|
| +// Check that the table handles overflows gracefully.
|
| +TEST_F(AllocationRegisterTest, OverflowTest) {
|
| const size_t allocation_capacity = GetAllocationCapacityPerPage();
|
| AllocationRegister reg(allocation_capacity, kBacktraceCapacity);
|
| AllocationContext ctx;
|
| @@ -260,9 +258,8 @@ TEST_F(AllocationRegisterTest, OverflowDeathTest) {
|
| }
|
|
|
| // Adding just one extra element should cause overflow.
|
| - ASSERT_DEATH(reg.Insert(reinterpret_cast<void*>(i + 1), 1, ctx), "");
|
| + ASSERT_FALSE(reg.Insert(reinterpret_cast<void*>(i + 1), 1, ctx));
|
| }
|
| -#endif
|
|
|
| } // namespace trace_event
|
| } // namespace base
|
|
|