Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 200 if (requested <= allocation_list_[current_allocation_block_index_].size) { | 200 if (requested <= allocation_list_[current_allocation_block_index_].size) { |
| 201 return; // Found a large enough allocation block. | 201 return; // Found a large enough allocation block. |
| 202 } | 202 } |
| 203 } | 203 } |
| 204 | 204 |
| 205 // Code range is full or too fragmented. | 205 // Code range is full or too fragmented. |
| 206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); | 206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); |
| 207 } | 207 } |
| 208 | 208 |
| 209 | 209 |
| 210 | 210 Address CodeRange::AllocateRawMemory(const size_t requested_size, |
| 211 Address CodeRange::AllocateRawMemory(const size_t requested, | 211 const size_t commit_size, |
| 212 size_t* allocated) { | 212 size_t* allocated) { |
| 213 ASSERT(commit_size <= requested_size); | |
| 213 ASSERT(current_allocation_block_index_ < allocation_list_.length()); | 214 ASSERT(current_allocation_block_index_ < allocation_list_.length()); |
| 214 if (requested > allocation_list_[current_allocation_block_index_].size) { | 215 if (requested_size > allocation_list_[current_allocation_block_index_].size) { |
| 215 // Find an allocation block large enough. This function call may | 216 // Find an allocation block large enough. This function call may |
| 216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. | 217 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. |
| 217 GetNextAllocationBlock(requested); | 218 GetNextAllocationBlock(requested_size); |
| 218 } | 219 } |
| 219 // Commit the requested memory at the start of the current allocation block. | 220 // Commit the requested memory at the start of the current allocation block. |
| 220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); | 221 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); |
| 221 FreeBlock current = allocation_list_[current_allocation_block_index_]; | 222 FreeBlock current = allocation_list_[current_allocation_block_index_]; |
| 222 if (aligned_requested >= (current.size - Page::kPageSize)) { | 223 if (aligned_requested >= (current.size - Page::kPageSize)) { |
| 223 // Don't leave a small free block, useless for a large object or chunk. | 224 // Don't leave a small free block, useless for a large object or chunk. |
| 224 *allocated = current.size; | 225 *allocated = current.size; |
| 225 } else { | 226 } else { |
| 226 *allocated = aligned_requested; | 227 *allocated = aligned_requested; |
| 227 } | 228 } |
| 228 ASSERT(*allocated <= current.size); | 229 ASSERT(*allocated <= current.size); |
| 229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); | 230 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
| 230 if (!MemoryAllocator::CommitCodePage(code_range_, | 231 if (!MemoryAllocator::CommitExecutableMemory(code_range_, |
| 231 current.start, | 232 current.start, |
| 232 *allocated)) { | 233 commit_size, |
| 234 *allocated)) { | |
| 233 *allocated = 0; | 235 *allocated = 0; |
| 234 return NULL; | 236 return NULL; |
| 235 } | 237 } |
| 236 allocation_list_[current_allocation_block_index_].start += *allocated; | 238 allocation_list_[current_allocation_block_index_].start += *allocated; |
| 237 allocation_list_[current_allocation_block_index_].size -= *allocated; | 239 allocation_list_[current_allocation_block_index_].size -= *allocated; |
| 238 if (*allocated == current.size) { | 240 if (*allocated == current.size) { |
| 239 GetNextAllocationBlock(0); // This block is used up, get the next one. | 241 GetNextAllocationBlock(0); // This block is used up, get the next one. |
| 240 } | 242 } |
| 241 return current.start; | 243 return current.start; |
| 242 } | 244 } |
| 243 | 245 |
| 244 | 246 |
| 247 bool CodeRange::CommitRawMemory(Address start, size_t length) { | |
| 248 // Commit page body (executable). | |
| 249 return code_range_->Commit(start, length, true); | |
| 250 } | |
| 251 | |
| 252 | |
| 245 void CodeRange::FreeRawMemory(Address address, size_t length) { | 253 void CodeRange::FreeRawMemory(Address address, size_t length) { |
| 246 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); | 254 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); |
| 247 free_list_.Add(FreeBlock(address, length)); | 255 free_list_.Add(FreeBlock(address, length)); |
| 248 code_range_->Uncommit(address, length); | 256 code_range_->Uncommit(address, length); |
| 249 } | 257 } |
| 250 | 258 |
| 251 | 259 |
| 252 void CodeRange::TearDown() { | 260 void CodeRange::TearDown() { |
| 253 delete code_range_; // Frees all memory in the virtual memory range. | 261 delete code_range_; // Frees all memory in the virtual memory range. |
| 254 code_range_ = NULL; | 262 code_range_ = NULL; |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 339 } | 347 } |
| 340 | 348 |
| 341 | 349 |
| 342 Address MemoryAllocator::ReserveAlignedMemory(size_t size, | 350 Address MemoryAllocator::ReserveAlignedMemory(size_t size, |
| 343 size_t alignment, | 351 size_t alignment, |
| 344 VirtualMemory* controller) { | 352 VirtualMemory* controller) { |
| 345 VirtualMemory reservation(size, alignment); | 353 VirtualMemory reservation(size, alignment); |
| 346 | 354 |
| 347 if (!reservation.IsReserved()) return NULL; | 355 if (!reservation.IsReserved()) return NULL; |
| 348 size_ += reservation.size(); | 356 size_ += reservation.size(); |
| 349 Address base = RoundUp(static_cast<Address>(reservation.address()), | 357 Address base = static_cast<Address>(reservation.address()); |
| 350 alignment); | |
| 351 controller->TakeControl(&reservation); | 358 controller->TakeControl(&reservation); |
| 352 return base; | 359 return base; |
| 353 } | 360 } |
| 354 | 361 |
| 355 | 362 |
| 356 Address MemoryAllocator::AllocateAlignedMemory(size_t size, | 363 Address MemoryAllocator::AllocateAlignedMemory(size_t requested_size, |
|
danno
2013/01/17 16:00:55
s/requested_size/reserve_size/
haitao.feng
2013/01/18 12:59:26
Done.
| |
| 364 size_t commit_size, | |
| 357 size_t alignment, | 365 size_t alignment, |
| 358 Executability executable, | 366 Executability executable, |
| 359 VirtualMemory* controller) { | 367 VirtualMemory* controller) { |
| 368 ASSERT(commit_size <= requested_size); | |
| 360 VirtualMemory reservation; | 369 VirtualMemory reservation; |
| 361 Address base = ReserveAlignedMemory(size, alignment, &reservation); | 370 Address base = ReserveAlignedMemory(requested_size, alignment, &reservation); |
| 362 if (base == NULL) return NULL; | 371 if (base == NULL) return NULL; |
| 363 | 372 |
| 364 if (executable == EXECUTABLE) { | 373 if (executable == EXECUTABLE) { |
| 365 if (!CommitCodePage(&reservation, base, size)) { | 374 if (!CommitExecutableMemory(&reservation, |
| 375 base, | |
| 376 commit_size, | |
| 377 requested_size)) { | |
| 366 base = NULL; | 378 base = NULL; |
| 367 } | 379 } |
| 368 } else { | 380 } else { |
| 369 if (!reservation.Commit(base, size, false)) { | 381 if (!reservation.Commit(base, commit_size, false)) { |
| 370 base = NULL; | 382 base = NULL; |
| 371 } | 383 } |
| 372 } | 384 } |
| 373 | 385 |
| 374 if (base == NULL) { | 386 if (base == NULL) { |
| 375 // Failed to commit the body. Release the mapping and any partially | 387 // Failed to commit the body. Release the mapping and any partially |
| 376 // commited regions inside it. | 388 // commited regions inside it. |
| 377 reservation.Release(); | 389 reservation.Release(); |
| 378 return NULL; | 390 return NULL; |
| 379 } | 391 } |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 463 } | 475 } |
| 464 | 476 |
| 465 if (owner == heap->old_data_space()) { | 477 if (owner == heap->old_data_space()) { |
| 466 chunk->SetFlag(CONTAINS_ONLY_DATA); | 478 chunk->SetFlag(CONTAINS_ONLY_DATA); |
| 467 } | 479 } |
| 468 | 480 |
| 469 return chunk; | 481 return chunk; |
| 470 } | 482 } |
| 471 | 483 |
| 472 | 484 |
| 485 bool MemoryChunk::CommitBody(size_t body_size, Executability executable) { | |
|
danno
2013/01/17 16:00:55
I don't think that CommitBody should take the exec
| |
| 486 ASSERT(body_size <= size_ - (area_start_ - address()) - | |
| 487 (executable == EXECUTABLE ? MemoryAllocator::CodePageGuardSize() : 0)); | |
| 488 | |
| 489 // Already committed, no uncommitment. | |
| 490 if (body_size <= (area_end_ - area_start_)) return true; | |
|
danno
2013/01/17 16:00:55
I think you should actually call UnCommit if the b
haitao.feng
2013/01/18 12:59:26
Done.
| |
| 491 | |
| 492 size_t length = body_size - (area_end_ - area_start_); | |
| 493 if (reservation_.IsReserved()) { | |
| 494 if (!reservation_.Commit(area_end_, length, executable == EXECUTABLE)) { | |
| 495 return false; | |
| 496 } | |
| 497 } else { | |
| 498 CodeRange* code_range = heap_->isolate()->code_range(); | |
| 499 ASSERT(code_range->exists() && (executable == EXECUTABLE)); | |
| 500 if (!code_range->CommitRawMemory(area_end_, length)) return false; | |
| 501 } | |
| 502 | |
| 503 if (Heap::ShouldZapGarbage()) { | |
| 504 heap_->isolate()->memory_allocator()->ZapBlock(area_end_, length); | |
| 505 } | |
| 506 | |
| 507 area_end_ = area_start_ + body_size; | |
| 508 | |
| 509 return true; | |
| 510 } | |
| 511 | |
| 512 | |
| 473 void MemoryChunk::InsertAfter(MemoryChunk* other) { | 513 void MemoryChunk::InsertAfter(MemoryChunk* other) { |
| 474 next_chunk_ = other->next_chunk_; | 514 next_chunk_ = other->next_chunk_; |
| 475 prev_chunk_ = other; | 515 prev_chunk_ = other; |
| 476 other->next_chunk_->prev_chunk_ = this; | 516 other->next_chunk_->prev_chunk_ = this; |
| 477 other->next_chunk_ = this; | 517 other->next_chunk_ = this; |
| 478 } | 518 } |
| 479 | 519 |
| 480 | 520 |
| 481 void MemoryChunk::Unlink() { | 521 void MemoryChunk::Unlink() { |
| 482 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { | 522 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { |
| 483 heap_->decrement_scan_on_scavenge_pages(); | 523 heap_->decrement_scan_on_scavenge_pages(); |
| 484 ClearFlag(SCAN_ON_SCAVENGE); | 524 ClearFlag(SCAN_ON_SCAVENGE); |
| 485 } | 525 } |
| 486 next_chunk_->prev_chunk_ = prev_chunk_; | 526 next_chunk_->prev_chunk_ = prev_chunk_; |
| 487 prev_chunk_->next_chunk_ = next_chunk_; | 527 prev_chunk_->next_chunk_ = next_chunk_; |
| 488 prev_chunk_ = NULL; | 528 prev_chunk_ = NULL; |
| 489 next_chunk_ = NULL; | 529 next_chunk_ = NULL; |
| 490 } | 530 } |
| 491 | 531 |
| 492 | 532 |
| 493 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, | 533 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_body_size, |
| 534 intptr_t commit_body_size, | |
| 494 Executability executable, | 535 Executability executable, |
| 495 Space* owner) { | 536 Space* owner) { |
| 537 ASSERT(commit_body_size <= reserve_body_size); | |
| 538 | |
| 496 size_t chunk_size; | 539 size_t chunk_size; |
| 497 Heap* heap = isolate_->heap(); | 540 Heap* heap = isolate_->heap(); |
| 498 Address base = NULL; | 541 Address base = NULL; |
| 499 VirtualMemory reservation; | 542 VirtualMemory reservation; |
| 500 Address area_start = NULL; | 543 Address area_start = NULL; |
| 501 Address area_end = NULL; | 544 Address area_end = NULL; |
| 502 | 545 |
| 503 if (executable == EXECUTABLE) { | 546 if (executable == EXECUTABLE) { |
| 504 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, | 547 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_body_size, |
| 505 OS::CommitPageSize()) + CodePageGuardSize(); | 548 OS::CommitPageSize()) + CodePageGuardSize(); |
| 506 | 549 |
| 507 // Check executable memory limit. | 550 // Check executable memory limit. |
| 508 if (size_executable_ + chunk_size > capacity_executable_) { | 551 if (size_executable_ + chunk_size > capacity_executable_) { |
| 509 LOG(isolate_, | 552 LOG(isolate_, |
| 510 StringEvent("MemoryAllocator::AllocateRawMemory", | 553 StringEvent("MemoryAllocator::AllocateRawMemory", |
| 511 "V8 Executable Allocation capacity exceeded")); | 554 "V8 Executable Allocation capacity exceeded")); |
| 512 return NULL; | 555 return NULL; |
| 513 } | 556 } |
| 514 | 557 |
| 515 // Allocate executable memory either from code range or from the | 558 // Allocate executable memory either from code range or from the |
| 516 // OS. | 559 // OS. |
| 517 if (isolate_->code_range()->exists()) { | 560 if (isolate_->code_range()->exists()) { |
| 518 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); | 561 base = isolate_->code_range()->AllocateRawMemory(chunk_size, |
| 562 commit_body_size, | |
| 563 &chunk_size); | |
| 519 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), | 564 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
| 520 MemoryChunk::kAlignment)); | 565 MemoryChunk::kAlignment)); |
| 521 if (base == NULL) return NULL; | 566 if (base == NULL) return NULL; |
| 522 size_ += chunk_size; | 567 size_ += chunk_size; |
| 523 // Update executable memory size. | 568 // Update executable memory size. |
| 524 size_executable_ += chunk_size; | 569 size_executable_ += chunk_size; |
| 525 } else { | 570 } else { |
| 526 base = AllocateAlignedMemory(chunk_size, | 571 base = AllocateAlignedMemory(chunk_size, |
| 572 commit_body_size, | |
| 527 MemoryChunk::kAlignment, | 573 MemoryChunk::kAlignment, |
| 528 executable, | 574 executable, |
| 529 &reservation); | 575 &reservation); |
| 530 if (base == NULL) return NULL; | 576 if (base == NULL) return NULL; |
| 531 // Update executable memory size. | 577 // Update executable memory size. |
| 532 size_executable_ += reservation.size(); | 578 size_executable_ += reservation.size(); |
| 533 } | 579 } |
| 534 | 580 |
| 535 if (Heap::ShouldZapGarbage()) { | 581 if (Heap::ShouldZapGarbage()) { |
| 536 ZapBlock(base, CodePageGuardStartOffset()); | 582 ZapBlock(base, CodePageGuardStartOffset()); |
| 537 ZapBlock(base + CodePageAreaStartOffset(), body_size); | 583 ZapBlock(base + CodePageAreaStartOffset(), commit_body_size); |
| 538 } | 584 } |
| 539 | 585 |
| 540 area_start = base + CodePageAreaStartOffset(); | 586 area_start = base + CodePageAreaStartOffset(); |
| 541 area_end = area_start + body_size; | 587 area_end = area_start + commit_body_size; |
| 542 } else { | 588 } else { |
| 543 chunk_size = MemoryChunk::kObjectStartOffset + body_size; | 589 chunk_size = MemoryChunk::kObjectStartOffset + reserve_body_size; |
| 590 size_t commit_size = MemoryChunk::kObjectStartOffset + commit_body_size; | |
| 544 base = AllocateAlignedMemory(chunk_size, | 591 base = AllocateAlignedMemory(chunk_size, |
| 592 commit_size, | |
| 545 MemoryChunk::kAlignment, | 593 MemoryChunk::kAlignment, |
| 546 executable, | 594 executable, |
| 547 &reservation); | 595 &reservation); |
| 548 | 596 |
| 549 if (base == NULL) return NULL; | 597 if (base == NULL) return NULL; |
| 550 | 598 |
| 551 if (Heap::ShouldZapGarbage()) { | 599 if (Heap::ShouldZapGarbage()) { |
| 552 ZapBlock(base, chunk_size); | 600 ZapBlock(base, MemoryChunk::kObjectStartOffset + commit_body_size); |
| 553 } | 601 } |
| 554 | 602 |
| 555 area_start = base + Page::kObjectStartOffset; | 603 area_start = base + Page::kObjectStartOffset; |
| 556 area_end = base + chunk_size; | 604 area_end = area_start + commit_body_size; |
| 557 } | 605 } |
| 558 | 606 |
| 607 // Use chunk_size for statistics and callbacks because we assume that they | |
| 608 // treat reserved but not-yet committed memory regions of chunks as allocated. | |
| 559 isolate_->counters()->memory_allocated()-> | 609 isolate_->counters()->memory_allocated()-> |
| 560 Increment(static_cast<int>(chunk_size)); | 610 Increment(static_cast<int>(chunk_size)); |
| 561 | 611 |
| 562 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); | 612 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
| 563 if (owner != NULL) { | 613 if (owner != NULL) { |
| 564 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | 614 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
| 565 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); | 615 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
| 566 } | 616 } |
| 567 | 617 |
| 568 MemoryChunk* result = MemoryChunk::Initialize(heap, | 618 MemoryChunk* result = MemoryChunk::Initialize(heap, |
| 569 base, | 619 base, |
| 570 chunk_size, | 620 chunk_size, |
| 571 area_start, | 621 area_start, |
| 572 area_end, | 622 area_end, |
| 573 executable, | 623 executable, |
| 574 owner); | 624 owner); |
| 575 result->set_reserved_memory(&reservation); | 625 result->set_reserved_memory(&reservation); |
| 576 return result; | 626 return result; |
| 577 } | 627 } |
| 578 | 628 |
| 579 | 629 |
| 580 Page* MemoryAllocator::AllocatePage(intptr_t size, | 630 Page* MemoryAllocator::AllocatePage(intptr_t size, |
| 581 PagedSpace* owner, | 631 PagedSpace* owner, |
| 582 Executability executable) { | 632 Executability executable) { |
| 583 MemoryChunk* chunk = AllocateChunk(size, executable, owner); | 633 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); |
| 584 | 634 |
| 585 if (chunk == NULL) return NULL; | 635 if (chunk == NULL) return NULL; |
| 586 | 636 |
| 587 return Page::Initialize(isolate_->heap(), chunk, executable, owner); | 637 return Page::Initialize(isolate_->heap(), chunk, executable, owner); |
| 588 } | 638 } |
| 589 | 639 |
| 590 | 640 |
| 591 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 641 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 592 Space* owner, | 642 Space* owner, |
| 593 Executability executable) { | 643 Executability executable) { |
| 594 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); | 644 MemoryChunk* chunk = AllocateChunk(object_size, |
| 645 object_size, | |
| 646 executable, | |
| 647 owner); | |
| 595 if (chunk == NULL) return NULL; | 648 if (chunk == NULL) return NULL; |
| 596 return LargePage::Initialize(isolate_->heap(), chunk); | 649 return LargePage::Initialize(isolate_->heap(), chunk); |
| 597 } | 650 } |
| 598 | 651 |
| 599 | 652 |
| 600 void MemoryAllocator::Free(MemoryChunk* chunk) { | 653 void MemoryAllocator::Free(MemoryChunk* chunk) { |
| 601 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 654 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 602 if (chunk->owner() != NULL) { | 655 if (chunk->owner() != NULL) { |
| 603 ObjectSpace space = | 656 ObjectSpace space = |
| 604 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | 657 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 726 } | 779 } |
| 727 | 780 |
| 728 | 781 |
| 729 int MemoryAllocator::CodePageAreaEndOffset() { | 782 int MemoryAllocator::CodePageAreaEndOffset() { |
| 730 // We are guarding code pages: the last OS page will be protected as | 783 // We are guarding code pages: the last OS page will be protected as |
| 731 // non-writable. | 784 // non-writable. |
| 732 return Page::kPageSize - static_cast<int>(OS::CommitPageSize()); | 785 return Page::kPageSize - static_cast<int>(OS::CommitPageSize()); |
| 733 } | 786 } |
| 734 | 787 |
| 735 | 788 |
| 736 bool MemoryAllocator::CommitCodePage(VirtualMemory* vm, | 789 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, |
| 737 Address start, | 790 Address start, |
| 738 size_t size) { | 791 size_t commit_size, |
| 792 size_t reserved_size) { | |
| 739 // Commit page header (not executable). | 793 // Commit page header (not executable). |
| 740 if (!vm->Commit(start, | 794 if (!vm->Commit(start, |
| 741 CodePageGuardStartOffset(), | 795 CodePageGuardStartOffset(), |
| 742 false)) { | 796 false)) { |
| 743 return false; | 797 return false; |
| 744 } | 798 } |
| 745 | 799 |
| 746 // Create guard page after the header. | 800 // Create guard page after the header. |
| 747 if (!vm->Guard(start + CodePageGuardStartOffset())) { | 801 if (!vm->Guard(start + CodePageGuardStartOffset())) { |
| 748 return false; | 802 return false; |
| 749 } | 803 } |
| 750 | 804 |
| 751 // Commit page body (executable). | 805 // Commit page body (executable). |
| 752 size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize(); | |
| 753 if (!vm->Commit(start + CodePageAreaStartOffset(), | 806 if (!vm->Commit(start + CodePageAreaStartOffset(), |
| 754 area_size, | 807 commit_size, |
| 755 true)) { | 808 true)) { |
| 756 return false; | 809 return false; |
| 757 } | 810 } |
| 758 | 811 |
| 759 // Create guard page after the allocatable area. | 812 // Create guard page before the end. |
| 760 if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) { | 813 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { |
| 761 return false; | 814 return false; |
| 762 } | 815 } |
| 763 | 816 |
| 764 return true; | 817 return true; |
| 765 } | 818 } |
| 766 | 819 |
| 767 | 820 |
| 768 // ----------------------------------------------------------------------------- | 821 // ----------------------------------------------------------------------------- |
| 769 // MemoryChunk implementation | 822 // MemoryChunk implementation |
| 770 | 823 |
| (...skipping 2215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2986 object->ShortPrint(); | 3039 object->ShortPrint(); |
| 2987 PrintF("\n"); | 3040 PrintF("\n"); |
| 2988 } | 3041 } |
| 2989 printf(" --------------------------------------\n"); | 3042 printf(" --------------------------------------\n"); |
| 2990 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3043 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 2991 } | 3044 } |
| 2992 | 3045 |
| 2993 #endif // DEBUG | 3046 #endif // DEBUG |
| 2994 | 3047 |
| 2995 } } // namespace v8::internal | 3048 } } // namespace v8::internal |
| OLD | NEW |