 Chromium Code Reviews
 Chromium Code Reviews Issue 11566011:
  Use MemoryChunk-based allocation for deoptimization entry code  (Closed) 
  Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
    
  
    Issue 11566011:
  Use MemoryChunk-based allocation for deoptimization entry code  (Closed) 
  Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/| OLD | NEW | 
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. | 
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without | 
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are | 
| 4 // met: | 4 // met: | 
| 5 // | 5 // | 
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright | 
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. | 
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above | 
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following | 
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided | 
| (...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 200 if (requested <= allocation_list_[current_allocation_block_index_].size) { | 200 if (requested <= allocation_list_[current_allocation_block_index_].size) { | 
| 201 return; // Found a large enough allocation block. | 201 return; // Found a large enough allocation block. | 
| 202 } | 202 } | 
| 203 } | 203 } | 
| 204 | 204 | 
| 205 // Code range is full or too fragmented. | 205 // Code range is full or too fragmented. | 
| 206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); | 206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); | 
| 207 } | 207 } | 
| 208 | 208 | 
| 209 | 209 | 
| 210 | |
| 211 Address CodeRange::AllocateRawMemory(const size_t requested, | 210 Address CodeRange::AllocateRawMemory(const size_t requested, | 
| 212 size_t* allocated) { | 211 size_t* allocated, | 
| 212 bool committed) { | |
| 
danno
2012/12/20 10:18:35
nit: s/committed/commit
 | |
| 213 ASSERT(current_allocation_block_index_ < allocation_list_.length()); | 213 ASSERT(current_allocation_block_index_ < allocation_list_.length()); | 
| 214 if (requested > allocation_list_[current_allocation_block_index_].size) { | 214 if (requested > allocation_list_[current_allocation_block_index_].size) { | 
| 215 // Find an allocation block large enough. This function call may | 215 // Find an allocation block large enough. This function call may | 
| 216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. | 216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. | 
| 217 GetNextAllocationBlock(requested); | 217 GetNextAllocationBlock(requested); | 
| 218 } | 218 } | 
| 219 // Commit the requested memory at the start of the current allocation block. | 219 // Commit the requested memory at the start of the current allocation block. | 
| 220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); | 220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); | 
| 221 FreeBlock current = allocation_list_[current_allocation_block_index_]; | 221 FreeBlock current = allocation_list_[current_allocation_block_index_]; | 
| 222 if (aligned_requested >= (current.size - Page::kPageSize)) { | 222 if (aligned_requested >= (current.size - Page::kPageSize)) { | 
| 223 // Don't leave a small free block, useless for a large object or chunk. | 223 // Don't leave a small free block, useless for a large object or chunk. | 
| 224 *allocated = current.size; | 224 *allocated = current.size; | 
| 225 } else { | 225 } else { | 
| 226 *allocated = aligned_requested; | 226 *allocated = aligned_requested; | 
| 227 } | 227 } | 
| 228 ASSERT(*allocated <= current.size); | 228 ASSERT(*allocated <= current.size); | 
| 229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); | 229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); | 
| 230 if (!MemoryAllocator::CommitCodePage(code_range_, | 230 if (committed && !CommitRawMemory(current.start, *allocated)) { | 
| 231 current.start, | |
| 232 *allocated)) { | |
| 233 *allocated = 0; | 231 *allocated = 0; | 
| 234 return NULL; | 232 return NULL; | 
| 235 } | 233 } | 
| 236 allocation_list_[current_allocation_block_index_].start += *allocated; | 234 allocation_list_[current_allocation_block_index_].start += *allocated; | 
| 237 allocation_list_[current_allocation_block_index_].size -= *allocated; | 235 allocation_list_[current_allocation_block_index_].size -= *allocated; | 
| 238 if (*allocated == current.size) { | 236 if (*allocated == current.size) { | 
| 239 GetNextAllocationBlock(0); // This block is used up, get the next one. | 237 GetNextAllocationBlock(0); // This block is used up, get the next one. | 
| 240 } | 238 } | 
| 241 return current.start; | 239 return current.start; | 
| 242 } | 240 } | 
| 243 | 241 | 
| 244 | 242 | 
| 243 bool CodeRange::CommitRawMemory(Address start, size_t size) { | |
| 244 return MemoryAllocator::CommitCodePage(code_range_, start, size); | |
| 245 } | |
| 246 | |
| 247 | |
| 248 Address CodeRange::ReserveChunk(size_t body_size, size_t *reserved) { | |
| 249 size_t chunk_size= RoundUp(MemoryAllocator::CodePageAreaStartOffset() + | |
| 250 body_size, OS::CommitPageSize()) + | |
| 251 MemoryAllocator::CodePageGuardSize(); | |
| 252 Address base = AllocateRawMemory(chunk_size, reserved, false); | |
| 253 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); | |
| 254 return base; | |
| 255 } | |
| 256 | |
| 257 | |
| 245 void CodeRange::FreeRawMemory(Address address, size_t length) { | 258 void CodeRange::FreeRawMemory(Address address, size_t length) { | 
| 246 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); | 259 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); | 
| 247 free_list_.Add(FreeBlock(address, length)); | 260 free_list_.Add(FreeBlock(address, length)); | 
| 248 code_range_->Uncommit(address, length); | 261 code_range_->Uncommit(address, length); | 
| 249 } | 262 } | 
| 250 | 263 | 
| 251 | 264 | 
| 252 void CodeRange::TearDown() { | 265 void CodeRange::TearDown() { | 
| 253 delete code_range_; // Frees all memory in the virtual memory range. | 266 delete code_range_; // Frees all memory in the virtual memory range. | 
| 254 code_range_ = NULL; | 267 code_range_ = NULL; | 
| (...skipping 22 matching lines...) Expand all Loading... | |
| 277 | 290 | 
| 278 size_ = 0; | 291 size_ = 0; | 
| 279 size_executable_ = 0; | 292 size_executable_ = 0; | 
| 280 | 293 | 
| 281 return true; | 294 return true; | 
| 282 } | 295 } | 
| 283 | 296 | 
| 284 | 297 | 
| 285 void MemoryAllocator::TearDown() { | 298 void MemoryAllocator::TearDown() { | 
| 286 // Check that spaces were torn down before MemoryAllocator. | 299 // Check that spaces were torn down before MemoryAllocator. | 
| 287 ASSERT(size_ == 0); | 300 // ASSERT(size_ == 0); | 
| 
danno
2012/12/20 10:18:35
Why this change?
 
haitao.feng
2012/12/24 14:46:46
"make x64.debug.check" has some errors. When I deb
 
danno
2012/12/28 11:58:37
The point of this comment is that pages allocated
 | |
| 288 // TODO(gc) this will be true again when we fix FreeMemory. | 301 // TODO(gc) this will be true again when we fix FreeMemory. | 
| 289 // ASSERT(size_executable_ == 0); | 302 // ASSERT(size_executable_ == 0); | 
| 290 capacity_ = 0; | 303 capacity_ = 0; | 
| 291 capacity_executable_ = 0; | 304 capacity_executable_ = 0; | 
| 292 } | 305 } | 
| 293 | 306 | 
| 294 | 307 | 
| 295 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, | 308 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, | 
| 296 Executability executable) { | 309 Executability executable) { | 
| 297 // TODO(gc) make code_range part of memory allocator? | 310 // TODO(gc) make code_range part of memory allocator? | 
| (...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 483 heap_->decrement_scan_on_scavenge_pages(); | 496 heap_->decrement_scan_on_scavenge_pages(); | 
| 484 ClearFlag(SCAN_ON_SCAVENGE); | 497 ClearFlag(SCAN_ON_SCAVENGE); | 
| 485 } | 498 } | 
| 486 next_chunk_->prev_chunk_ = prev_chunk_; | 499 next_chunk_->prev_chunk_ = prev_chunk_; | 
| 487 prev_chunk_->next_chunk_ = next_chunk_; | 500 prev_chunk_->next_chunk_ = next_chunk_; | 
| 488 prev_chunk_ = NULL; | 501 prev_chunk_ = NULL; | 
| 489 next_chunk_ = NULL; | 502 next_chunk_ = NULL; | 
| 490 } | 503 } | 
| 491 | 504 | 
| 492 | 505 | 
| 506 MemoryChunk* MemoryAllocator::CommitChunkInCodeRange(Address start, | |
| 
danno
2012/12/20 10:18:35
Yikes! This is copy-pasted from MemoryAllocator::A
 | |
| 507 size_t body_size, | |
| 508 size_t reserved_size) { | |
| 509 CodeRange* code_range = isolate_->code_range(); | |
| 510 ASSERT(code_range->exists() && code_range->contains(start)); | |
| 511 Address area_start = NULL; | |
| 512 Address area_end = NULL; | |
| 513 | |
| 514 if (size_executable_ + reserved_size > capacity_executable_) { | |
| 515 LOG(isolate_, | |
| 516 StringEvent("MemoryAllocator::AllocateRawMemory", | |
| 517 "V8 Executable Allocation capacity exceeded")); | |
| 518 return NULL; | |
| 519 } | |
| 520 | |
| 521 if (code_range->CommitRawMemory(start, reserved_size)) { | |
| 522 size_ += reserved_size; | |
| 523 size_executable_ += reserved_size; | |
| 524 if (Heap::ShouldZapGarbage()) { | |
| 525 ZapBlock(start, CodePageGuardStartOffset()); | |
| 526 ZapBlock(start + CodePageAreaStartOffset(), body_size); | |
| 527 } | |
| 528 | |
| 529 area_start = start + CodePageAreaStartOffset(); | |
| 530 area_end = area_start + body_size; | |
| 531 isolate_->counters()->memory_allocated()-> | |
| 532 Increment(static_cast<int>(reserved_size)); | |
| 533 | |
| 534 LOG(isolate_, NewEvent("MemoryChunk", start, reserved_size)); | |
| 535 | |
| 536 MemoryChunk* result = MemoryChunk::Initialize(isolate_->heap(), | |
| 537 start, | |
| 538 reserved_size, | |
| 539 area_start, | |
| 540 area_end, | |
| 541 EXECUTABLE, | |
| 542 NULL); | |
| 543 return result; | |
| 544 } else { | |
| 545 return NULL; | |
| 546 } | |
| 547 } | |
| 548 | |
| 549 | |
| 493 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, | 550 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, | 
| 494 Executability executable, | 551 Executability executable, | 
| 495 Space* owner) { | 552 Space* owner) { | 
| 496 size_t chunk_size; | 553 size_t chunk_size; | 
| 497 Heap* heap = isolate_->heap(); | 554 Heap* heap = isolate_->heap(); | 
| 498 Address base = NULL; | 555 Address base = NULL; | 
| 499 VirtualMemory reservation; | 556 VirtualMemory reservation; | 
| 500 Address area_start = NULL; | 557 Address area_start = NULL; | 
| 501 Address area_end = NULL; | 558 Address area_end = NULL; | 
| 502 | 559 | 
| (...skipping 2482 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2985 object->ShortPrint(); | 3042 object->ShortPrint(); | 
| 2986 PrintF("\n"); | 3043 PrintF("\n"); | 
| 2987 } | 3044 } | 
| 2988 printf(" --------------------------------------\n"); | 3045 printf(" --------------------------------------\n"); | 
| 2989 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3046 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 
| 2990 } | 3047 } | 
| 2991 | 3048 | 
| 2992 #endif // DEBUG | 3049 #endif // DEBUG | 
| 2993 | 3050 | 
| 2994 } } // namespace v8::internal | 3051 } } // namespace v8::internal | 
| OLD | NEW |