Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(23)

Side by Side Diff: syzygy/agent/asan/heap_managers/block_heap_manager.cc

Issue 2527533003: Make SyzyAsan support the allocation > 1GB (Closed)
Patch Set: Do an unguarded alloc if the size > 2GB Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 Google Inc. All Rights Reserved. 1 // Copyright 2014 Google Inc. All Rights Reserved.
2 // 2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); 3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License. 4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at 5 // You may obtain a copy of the License at
6 // 6 //
7 // http://www.apache.org/licenses/LICENSE-2.0 7 // http://www.apache.org/licenses/LICENSE-2.0
8 // 8 //
9 // Unless required by applicable law or agreed to in writing, software 9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, 10 // distributed under the License is distributed on an "AS IS" BASIS,
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
59 shift = (n > 0xF) << 2; 59 shift = (n > 0xF) << 2;
60 n >>= shift; 60 n >>= shift;
61 r |= shift; 61 r |= shift;
62 shift = (n > 0x3) << 1; 62 shift = (n > 0x3) << 1;
63 n >>= shift; 63 n >>= shift;
64 r |= shift; 64 r |= shift;
65 r |= (n >> 1); 65 r |= (n >> 1);
66 return r; 66 return r;
67 } 67 }
68 68
69 // Try to do an unguarded allocation.
70 // @param heap_interface The heap that should serve the allocation.
71 // @param shadow The shadow memory.
72 // @param bytes The size of the allocation.
73 // @returns a pointer to the allocation on success, nullptr otherwise.
74 void* DoUnguardedAllocation(BlockHeapInterface* heap_interface,
75 Shadow* shadow,
76 uint32_t bytes) {
77 void* alloc = heap_interface->Allocate(bytes);
78 if ((heap_interface->GetHeapFeatures() &
79 HeapInterface::kHeapReportsReservations) != 0) {
80 shadow->Unpoison(alloc, bytes);
81 }
82 return alloc;
83 }
84
69 } // namespace 85 } // namespace
70 86
71 BlockHeapManager::BlockHeapManager(Shadow* shadow, 87 BlockHeapManager::BlockHeapManager(Shadow* shadow,
72 StackCaptureCache* stack_cache, 88 StackCaptureCache* stack_cache,
73 MemoryNotifierInterface* memory_notifier) 89 MemoryNotifierInterface* memory_notifier)
74 : shadow_(shadow), 90 : shadow_(shadow),
75 stack_cache_(stack_cache), 91 stack_cache_(stack_cache),
76 memory_notifier_(memory_notifier), 92 memory_notifier_(memory_notifier),
77 initialized_(false), 93 initialized_(false),
78 process_heap_(nullptr), 94 process_heap_(nullptr),
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
172 return true; 188 return true;
173 } 189 }
174 190
175 void* BlockHeapManager::Allocate(HeapId heap_id, uint32_t bytes) { 191 void* BlockHeapManager::Allocate(HeapId heap_id, uint32_t bytes) {
176 DCHECK(initialized_); 192 DCHECK(initialized_);
177 DCHECK(IsValidHeapId(heap_id, false)); 193 DCHECK(IsValidHeapId(heap_id, false));
178 194
179 // Some allocations can pass through without instrumentation. 195 // Some allocations can pass through without instrumentation.
180 if (parameters_.allocation_guard_rate < 1.0 && 196 if (parameters_.allocation_guard_rate < 1.0 &&
181 base::RandDouble() >= parameters_.allocation_guard_rate) { 197 base::RandDouble() >= parameters_.allocation_guard_rate) {
182 BlockHeapInterface* heap = GetHeapFromId(heap_id); 198 return DoUnguardedAllocation(GetHeapFromId(heap_id), shadow_, bytes);
183 void* alloc = heap->Allocate(bytes);
184 if ((heap->GetHeapFeatures() &
185 HeapInterface::kHeapReportsReservations) != 0) {
186 shadow_->Unpoison(alloc, bytes);
187 }
188 return alloc;
189 } 199 }
190 200
191 // Capture the current stack. InitFromStack is inlined to preserve the 201 // Capture the current stack. InitFromStack is inlined to preserve the
192 // greatest number of stack frames. 202 // greatest number of stack frames.
193 common::StackCapture stack; 203 common::StackCapture stack;
194 stack.InitFromStack(); 204 stack.InitFromStack();
195 205
196 // Build the set of heaps that will be used to satisfy the allocation. This 206 // Build the set of heaps that will be used to satisfy the allocation. This
197 // is a stack of heaps, and they will be tried in the reverse order they are 207 // is a stack of heaps, and they will be tried in the reverse order they are
198 // inserted. 208 // inserted.
(...skipping 20 matching lines...) Expand all
219 bytes, 229 bytes,
220 0, 230 0,
221 parameters_.trailer_padding_size + sizeof(BlockTrailer), 231 parameters_.trailer_padding_size + sizeof(BlockTrailer),
222 &block_layout); 232 &block_layout);
223 if (alloc != nullptr) { 233 if (alloc != nullptr) {
224 heap_id = heaps[i]; 234 heap_id = heaps[i];
225 break; 235 break;
226 } 236 }
227 } 237 }
228 238
229 // The allocation can fail if we're out of memory. 239 // The allocation might fail because its size exceed the maximum size that
Sigurður Ásgeirsson 2016/11/25 18:16:11 It would IMHO make much more sense, and be much si
Sébastien Marchand 2016/11/25 21:14:57 I'm not sure, we had some CF test cases who alloca
240 // we can represent in the BlockHeader structure, try to do an unguarded
241 // allocation.
230 if (alloc == nullptr) 242 if (alloc == nullptr)
231 return nullptr; 243 return DoUnguardedAllocation(GetHeapFromId(heap_id), shadow_, bytes);
232 244
233 DCHECK_NE(static_cast<void*>(nullptr), alloc); 245 DCHECK_NE(static_cast<void*>(nullptr), alloc);
234 DCHECK_EQ(0u, reinterpret_cast<size_t>(alloc) % kShadowRatio); 246 DCHECK_EQ(0u, reinterpret_cast<size_t>(alloc) % kShadowRatio);
235 BlockInfo block = {}; 247 BlockInfo block = {};
236 BlockInitialize(block_layout, alloc, &block); 248 BlockInitialize(block_layout, alloc, &block);
237 249
238 // Poison the redzones in the shadow memory as early as possible. 250 // Poison the redzones in the shadow memory as early as possible.
239 shadow_->PoisonAllocatedBlock(block); 251 shadow_->PoisonAllocatedBlock(block);
240 252
241 block.header->alloc_stack = stack_cache_->SaveStackTrace(stack); 253 block.header->alloc_stack = stack_cache_->SaveStackTrace(stack);
(...skipping 870 matching lines...) Expand 10 before | Expand all | Expand 10 after
1112 if (trailer_has_valid_heap_id) 1124 if (trailer_has_valid_heap_id)
1113 return block_info->trailer->heap_id; 1125 return block_info->trailer->heap_id;
1114 1126
1115 // Unfortunately, there's no way to know which heap this block belongs to. 1127 // Unfortunately, there's no way to know which heap this block belongs to.
1116 return 0; 1128 return 0;
1117 } 1129 }
1118 1130
1119 } // namespace heap_managers 1131 } // namespace heap_managers
1120 } // namespace asan 1132 } // namespace asan
1121 } // namespace agent 1133 } // namespace agent
OLDNEW
« no previous file with comments | « syzygy/agent/asan/error_info_unittest.cc ('k') | syzygy/agent/asan/heap_managers/block_heap_manager_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698