Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1378)

Side by Side Diff: src/heap/spaces.cc

Issue 1341293002: [heap] Extend mutex guards for CodeRange. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen/full-codegen.h" 9 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/mark-compact.h" 10 #include "src/heap/mark-compact.h"
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
156 156
157 bool CodeRange::GetNextAllocationBlock(size_t requested) { 157 bool CodeRange::GetNextAllocationBlock(size_t requested) {
158 for (current_allocation_block_index_++; 158 for (current_allocation_block_index_++;
159 current_allocation_block_index_ < allocation_list_.length(); 159 current_allocation_block_index_ < allocation_list_.length();
160 current_allocation_block_index_++) { 160 current_allocation_block_index_++) {
161 if (requested <= allocation_list_[current_allocation_block_index_].size) { 161 if (requested <= allocation_list_[current_allocation_block_index_].size) {
162 return true; // Found a large enough allocation block. 162 return true; // Found a large enough allocation block.
163 } 163 }
164 } 164 }
165 165
166 { 166 // Sort and merge the free blocks on the free list and the allocation list.
167 base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_); 167 free_list_.AddAll(allocation_list_);
168 168 allocation_list_.Clear();
169 // Sort and merge the free blocks on the free list and the allocation list. 169 free_list_.Sort(&CompareFreeBlockAddress);
170 free_list_.AddAll(allocation_list_); 170 for (int i = 0; i < free_list_.length();) {
171 allocation_list_.Clear(); 171 FreeBlock merged = free_list_[i];
172 free_list_.Sort(&CompareFreeBlockAddress); 172 i++;
173 for (int i = 0; i < free_list_.length();) { 173 // Add adjacent free blocks to the current merged block.
174 FreeBlock merged = free_list_[i]; 174 while (i < free_list_.length() &&
175 free_list_[i].start == merged.start + merged.size) {
176 merged.size += free_list_[i].size;
175 i++; 177 i++;
176 // Add adjacent free blocks to the current merged block.
177 while (i < free_list_.length() &&
178 free_list_[i].start == merged.start + merged.size) {
179 merged.size += free_list_[i].size;
180 i++;
181 }
182 if (merged.size > 0) {
183 allocation_list_.Add(merged);
184 }
185 } 178 }
186 free_list_.Clear(); 179 if (merged.size > 0) {
180 allocation_list_.Add(merged);
181 }
187 } 182 }
183 free_list_.Clear();
188 184
189 for (current_allocation_block_index_ = 0; 185 for (current_allocation_block_index_ = 0;
190 current_allocation_block_index_ < allocation_list_.length(); 186 current_allocation_block_index_ < allocation_list_.length();
191 current_allocation_block_index_++) { 187 current_allocation_block_index_++) {
192 if (requested <= allocation_list_[current_allocation_block_index_].size) { 188 if (requested <= allocation_list_[current_allocation_block_index_].size) {
193 return true; // Found a large enough allocation block. 189 return true; // Found a large enough allocation block.
194 } 190 }
195 } 191 }
196 current_allocation_block_index_ = 0; 192 current_allocation_block_index_ = 0;
197 // Code range is full or too fragmented. 193 // Code range is full or too fragmented.
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
229 } 225 }
230 226
231 227
232 bool CodeRange::UncommitRawMemory(Address start, size_t length) { 228 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
233 return code_range_->Uncommit(start, length); 229 return code_range_->Uncommit(start, length);
234 } 230 }
235 231
236 232
237 void CodeRange::FreeRawMemory(Address address, size_t length) { 233 void CodeRange::FreeRawMemory(Address address, size_t length) {
238 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); 234 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
239 base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_); 235 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
240 free_list_.Add(FreeBlock(address, length)); 236 free_list_.Add(FreeBlock(address, length));
241 code_range_->Uncommit(address, length); 237 code_range_->Uncommit(address, length);
242 } 238 }
243 239
244 240
245 void CodeRange::TearDown() { 241 void CodeRange::TearDown() {
246 delete code_range_; // Frees all memory in the virtual memory range. 242 delete code_range_; // Frees all memory in the virtual memory range.
247 code_range_ = NULL; 243 code_range_ = NULL;
248 base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_); 244 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
249 free_list_.Free(); 245 free_list_.Free();
250 allocation_list_.Free(); 246 allocation_list_.Free();
251 } 247 }
252 248
253 249
254 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) { 250 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
251 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
255 DCHECK(allocation_list_.length() == 0 || 252 DCHECK(allocation_list_.length() == 0 ||
256 current_allocation_block_index_ < allocation_list_.length()); 253 current_allocation_block_index_ < allocation_list_.length());
257 if (allocation_list_.length() == 0 || 254 if (allocation_list_.length() == 0 ||
258 requested_size > allocation_list_[current_allocation_block_index_].size) { 255 requested_size > allocation_list_[current_allocation_block_index_].size) {
259 // Find an allocation block large enough. 256 // Find an allocation block large enough.
260 if (!GetNextAllocationBlock(requested_size)) return false; 257 if (!GetNextAllocationBlock(requested_size)) return false;
261 } 258 }
262 // Commit the requested memory at the start of the current allocation block. 259 // Commit the requested memory at the start of the current allocation block.
263 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); 260 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
264 *block = allocation_list_[current_allocation_block_index_]; 261 *block = allocation_list_[current_allocation_block_index_];
265 // Don't leave a small free block, useless for a large object or chunk. 262 // Don't leave a small free block, useless for a large object or chunk.
266 if (aligned_requested < (block->size - Page::kPageSize)) { 263 if (aligned_requested < (block->size - Page::kPageSize)) {
267 block->size = aligned_requested; 264 block->size = aligned_requested;
268 } 265 }
269 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment)); 266 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
270 allocation_list_[current_allocation_block_index_].start += block->size; 267 allocation_list_[current_allocation_block_index_].start += block->size;
271 allocation_list_[current_allocation_block_index_].size -= block->size; 268 allocation_list_[current_allocation_block_index_].size -= block->size;
272 return true; 269 return true;
273 } 270 }
274 271
275 272
276 void CodeRange::ReleaseBlock(const FreeBlock* block) { 273 void CodeRange::ReleaseBlock(const FreeBlock* block) {
277 base::LockGuard<base::Mutex> free_list_lock_guard(&free_list_mutex_); 274 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
278 free_list_.Add(*block); 275 free_list_.Add(*block);
279 } 276 }
280 277
281 278
282 void CodeRange::ReserveEmergencyBlock() { 279 void CodeRange::ReserveEmergencyBlock() {
283 const size_t requested_size = MemoryAllocator::CodePageAreaSize(); 280 const size_t requested_size = MemoryAllocator::CodePageAreaSize();
284 if (emergency_block_.size == 0) { 281 if (emergency_block_.size == 0) {
285 ReserveBlock(requested_size, &emergency_block_); 282 ReserveBlock(requested_size, &emergency_block_);
286 } else { 283 } else {
287 DCHECK(emergency_block_.size >= requested_size); 284 DCHECK(emergency_block_.size >= requested_size);
(...skipping 2897 matching lines...) Expand 10 before | Expand all | Expand 10 after
3185 object->ShortPrint(); 3182 object->ShortPrint();
3186 PrintF("\n"); 3183 PrintF("\n");
3187 } 3184 }
3188 printf(" --------------------------------------\n"); 3185 printf(" --------------------------------------\n");
3189 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3186 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3190 } 3187 }
3191 3188
3192 #endif // DEBUG 3189 #endif // DEBUG
3193 } // namespace internal 3190 } // namespace internal
3194 } // namespace v8 3191 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698