Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(21)

Side by Side Diff: base/allocator/allocator_shim_unittest.cc

Issue 2697123007: base: Add support for malloc zones to the allocator shim (Closed)
Patch Set: Windows compile error. Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/allocator/allocator_shim.h" 5 #include "base/allocator/allocator_shim.h"
6 6
7 #include <stdlib.h> 7 #include <stdlib.h>
8 #include <string.h> 8 #include <string.h>
9 9
10 #include <memory> 10 #include <memory>
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
52 52
53 class AllocatorShimTest : public testing::Test { 53 class AllocatorShimTest : public testing::Test {
54 public: 54 public:
55 static const size_t kMaxSizeTracked = 8192; 55 static const size_t kMaxSizeTracked = 8192;
56 AllocatorShimTest() : testing::Test() {} 56 AllocatorShimTest() : testing::Test() {}
57 57
58 static size_t Hash(const void* ptr) { 58 static size_t Hash(const void* ptr) {
59 return reinterpret_cast<uintptr_t>(ptr) % kMaxSizeTracked; 59 return reinterpret_cast<uintptr_t>(ptr) % kMaxSizeTracked;
60 } 60 }
61 61
62 static void* MockAlloc(const AllocatorDispatch* self, size_t size) { 62 static void* MockAlloc(const AllocatorDispatch* self,
63 size_t size,
64 void* context) {
63 if (instance_ && size < kMaxSizeTracked) 65 if (instance_ && size < kMaxSizeTracked)
64 ++(instance_->allocs_intercepted_by_size[size]); 66 ++(instance_->allocs_intercepted_by_size[size]);
65 return self->next->alloc_function(self->next, size); 67 return self->next->alloc_function(self->next, size, context);
66 } 68 }
67 69
68 static void* MockAllocZeroInit(const AllocatorDispatch* self, 70 static void* MockAllocZeroInit(const AllocatorDispatch* self,
69 size_t n, 71 size_t n,
70 size_t size) { 72 size_t size,
73 void* context) {
71 const size_t real_size = n * size; 74 const size_t real_size = n * size;
72 if (instance_ && real_size < kMaxSizeTracked) 75 if (instance_ && real_size < kMaxSizeTracked)
73 ++(instance_->zero_allocs_intercepted_by_size[real_size]); 76 ++(instance_->zero_allocs_intercepted_by_size[real_size]);
74 return self->next->alloc_zero_initialized_function(self->next, n, size); 77 return self->next->alloc_zero_initialized_function(self->next, n, size,
78 context);
75 } 79 }
76 80
77 static void* MockAllocAligned(const AllocatorDispatch* self, 81 static void* MockAllocAligned(const AllocatorDispatch* self,
78 size_t alignment, 82 size_t alignment,
79 size_t size) { 83 size_t size,
84 void* context) {
80 if (instance_) { 85 if (instance_) {
81 if (size < kMaxSizeTracked) 86 if (size < kMaxSizeTracked)
82 ++(instance_->aligned_allocs_intercepted_by_size[size]); 87 ++(instance_->aligned_allocs_intercepted_by_size[size]);
83 if (alignment < kMaxSizeTracked) 88 if (alignment < kMaxSizeTracked)
84 ++(instance_->aligned_allocs_intercepted_by_alignment[alignment]); 89 ++(instance_->aligned_allocs_intercepted_by_alignment[alignment]);
85 } 90 }
86 return self->next->alloc_aligned_function(self->next, alignment, size); 91 return self->next->alloc_aligned_function(self->next, alignment, size,
92 context);
87 } 93 }
88 94
89 static void* MockRealloc(const AllocatorDispatch* self, 95 static void* MockRealloc(const AllocatorDispatch* self,
90 void* address, 96 void* address,
91 size_t size) { 97 size_t size,
98 void* context) {
92 if (instance_) { 99 if (instance_) {
93 // Size 0xFEED a special sentinel for the NewHandlerConcurrency test. 100 // Size 0xFEED a special sentinel for the NewHandlerConcurrency test.
94 // Hitting it for the first time will cause a failure, causing the 101 // Hitting it for the first time will cause a failure, causing the
95 // invocation of the std::new_handler. 102 // invocation of the std::new_handler.
96 if (size == 0xFEED) { 103 if (size == 0xFEED) {
97 if (!instance_->did_fail_realloc_0xfeed_once->Get()) { 104 if (!instance_->did_fail_realloc_0xfeed_once->Get()) {
98 instance_->did_fail_realloc_0xfeed_once->Set(true); 105 instance_->did_fail_realloc_0xfeed_once->Set(true);
99 return nullptr; 106 return nullptr;
100 } else { 107 } else {
101 return address; 108 return address;
102 } 109 }
103 } 110 }
104 111
105 if (size < kMaxSizeTracked) 112 if (size < kMaxSizeTracked)
106 ++(instance_->reallocs_intercepted_by_size[size]); 113 ++(instance_->reallocs_intercepted_by_size[size]);
107 ++instance_->reallocs_intercepted_by_addr[Hash(address)]; 114 ++instance_->reallocs_intercepted_by_addr[Hash(address)];
108 } 115 }
109 return self->next->realloc_function(self->next, address, size); 116 return self->next->realloc_function(self->next, address, size, context);
110 } 117 }
111 118
112 static void MockFree(const AllocatorDispatch* self, void* address) { 119 static void MockFree(const AllocatorDispatch* self,
120 void* address,
121 void* context) {
113 if (instance_) { 122 if (instance_) {
114 ++instance_->frees_intercepted_by_addr[Hash(address)]; 123 ++instance_->frees_intercepted_by_addr[Hash(address)];
115 } 124 }
116 self->next->free_function(self->next, address); 125 self->next->free_function(self->next, address, context);
117 } 126 }
118 127
119 static size_t MockGetSizeEstimate(const AllocatorDispatch* self, 128 static size_t MockGetSizeEstimate(const AllocatorDispatch* self,
120 void* address) { 129 void* address,
121 return self->next->get_size_estimate_function(self->next, address); 130 void* context) {
131 return self->next->get_size_estimate_function(self->next, address, context);
122 } 132 }
123 133
124 static unsigned MockBatchMalloc(const AllocatorDispatch* self, 134 static unsigned MockBatchMalloc(const AllocatorDispatch* self,
125 size_t size, 135 size_t size,
126 void** results, 136 void** results,
127 unsigned num_requested) { 137 unsigned num_requested,
138 void* context) {
128 if (instance_) { 139 if (instance_) {
129 instance_->batch_mallocs_intercepted_by_size[size] = 140 instance_->batch_mallocs_intercepted_by_size[size] =
130 instance_->batch_mallocs_intercepted_by_size[size] + num_requested; 141 instance_->batch_mallocs_intercepted_by_size[size] + num_requested;
131 } 142 }
132 return self->next->batch_malloc_function(self->next, size, results, 143 return self->next->batch_malloc_function(self->next, size, results,
133 num_requested); 144 num_requested, context);
134 } 145 }
135 146
136 static void MockBatchFree(const AllocatorDispatch* self, 147 static void MockBatchFree(const AllocatorDispatch* self,
137 void** to_be_freed, 148 void** to_be_freed,
138 unsigned num_to_be_freed) { 149 unsigned num_to_be_freed,
150 void* context) {
139 if (instance_) { 151 if (instance_) {
140 for (unsigned i = 0; i < num_to_be_freed; ++i) { 152 for (unsigned i = 0; i < num_to_be_freed; ++i) {
141 ++instance_->batch_frees_intercepted_by_addr[Hash(to_be_freed[i])]; 153 ++instance_->batch_frees_intercepted_by_addr[Hash(to_be_freed[i])];
142 } 154 }
143 } 155 }
144 self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed); 156 self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
157 context);
145 } 158 }
146 159
147 static void MockFreeDefiniteSize(const AllocatorDispatch* self, 160 static void MockFreeDefiniteSize(const AllocatorDispatch* self,
148 void* ptr, 161 void* ptr,
149 size_t size) { 162 size_t size,
163 void* context) {
150 if (instance_) { 164 if (instance_) {
151 ++instance_->frees_intercepted_by_addr[Hash(ptr)]; 165 ++instance_->frees_intercepted_by_addr[Hash(ptr)];
152 ++instance_->free_definite_sizes_intercepted_by_size[size]; 166 ++instance_->free_definite_sizes_intercepted_by_size[size];
153 } 167 }
154 self->next->free_definite_size_function(self->next, ptr, size); 168 self->next->free_definite_size_function(self->next, ptr, size, context);
155 } 169 }
156 170
157 static void NewHandler() { 171 static void NewHandler() {
158 if (!instance_) 172 if (!instance_)
159 return; 173 return;
160 subtle::Barrier_AtomicIncrement(&instance_->num_new_handler_calls, 1); 174 subtle::Barrier_AtomicIncrement(&instance_->num_new_handler_calls, 1);
161 } 175 }
162 176
163 int32_t GetNumberOfNewHandlerCalls() { 177 int32_t GetNumberOfNewHandlerCalls() {
164 return subtle::Acquire_Load(&instance_->num_new_handler_calls); 178 return subtle::Acquire_Load(&instance_->num_new_handler_calls);
(...skipping 268 matching lines...) Expand 10 before | Expand all | Expand 10 after
433 447
434 #if defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) 448 #if defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
435 TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) { 449 TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) {
436 ASSERT_NE(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle())); 450 ASSERT_NE(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle()));
437 } 451 }
438 #endif // defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) 452 #endif // defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
439 453
440 } // namespace 454 } // namespace
441 } // namespace allocator 455 } // namespace allocator
442 } // namespace base 456 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698