Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(7)

Side by Side Diff: src/spaces.cc

Issue 11566011: Use MemoryChunk-based allocation for deoptimization entry code (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | test/cctest/test-alloc.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after
199 if (requested <= allocation_list_[current_allocation_block_index_].size) { 199 if (requested <= allocation_list_[current_allocation_block_index_].size) {
200 return; // Found a large enough allocation block. 200 return; // Found a large enough allocation block.
201 } 201 }
202 } 202 }
203 203
204 // Code range is full or too fragmented. 204 // Code range is full or too fragmented.
205 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); 205 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
206 } 206 }
207 207
208 208
209 209 Address CodeRange::AllocateRawMemory(const size_t requested_size,
210 Address CodeRange::AllocateRawMemory(const size_t requested, 210 const size_t commit_size,
211 size_t* allocated) { 211 size_t* allocated) {
212 ASSERT(commit_size <= requested_size);
212 ASSERT(current_allocation_block_index_ < allocation_list_.length()); 213 ASSERT(current_allocation_block_index_ < allocation_list_.length());
213 if (requested > allocation_list_[current_allocation_block_index_].size) { 214 if (requested_size > allocation_list_[current_allocation_block_index_].size) {
214 // Find an allocation block large enough. This function call may 215 // Find an allocation block large enough. This function call may
215 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. 216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
216 GetNextAllocationBlock(requested); 217 GetNextAllocationBlock(requested_size);
217 } 218 }
218 // Commit the requested memory at the start of the current allocation block. 219 // Commit the requested memory at the start of the current allocation block.
219 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); 220 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
220 FreeBlock current = allocation_list_[current_allocation_block_index_]; 221 FreeBlock current = allocation_list_[current_allocation_block_index_];
221 if (aligned_requested >= (current.size - Page::kPageSize)) { 222 if (aligned_requested >= (current.size - Page::kPageSize)) {
222 // Don't leave a small free block, useless for a large object or chunk. 223 // Don't leave a small free block, useless for a large object or chunk.
223 *allocated = current.size; 224 *allocated = current.size;
224 } else { 225 } else {
225 *allocated = aligned_requested; 226 *allocated = aligned_requested;
226 } 227 }
227 ASSERT(*allocated <= current.size); 228 ASSERT(*allocated <= current.size);
228 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); 229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
229 if (!MemoryAllocator::CommitCodePage(code_range_, 230 if (!MemoryAllocator::CommitExecutableMemory(code_range_,
230 current.start, 231 current.start,
231 *allocated)) { 232 commit_size,
233 *allocated)) {
232 *allocated = 0; 234 *allocated = 0;
233 return NULL; 235 return NULL;
234 } 236 }
235 allocation_list_[current_allocation_block_index_].start += *allocated; 237 allocation_list_[current_allocation_block_index_].start += *allocated;
236 allocation_list_[current_allocation_block_index_].size -= *allocated; 238 allocation_list_[current_allocation_block_index_].size -= *allocated;
237 if (*allocated == current.size) { 239 if (*allocated == current.size) {
238 GetNextAllocationBlock(0); // This block is used up, get the next one. 240 GetNextAllocationBlock(0); // This block is used up, get the next one.
239 } 241 }
240 return current.start; 242 return current.start;
241 } 243 }
242 244
243 245
246 bool CodeRange::CommitRawMemory(Address start, size_t length) {
247 return code_range_->Commit(start, length, true);
248 }
249
250
251 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
252 return code_range_->Uncommit(start, length);
253 }
254
255
244 void CodeRange::FreeRawMemory(Address address, size_t length) { 256 void CodeRange::FreeRawMemory(Address address, size_t length) {
245 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); 257 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
246 free_list_.Add(FreeBlock(address, length)); 258 free_list_.Add(FreeBlock(address, length));
247 code_range_->Uncommit(address, length); 259 code_range_->Uncommit(address, length);
248 } 260 }
249 261
250 262
251 void CodeRange::TearDown() { 263 void CodeRange::TearDown() {
252 delete code_range_; // Frees all memory in the virtual memory range. 264 delete code_range_; // Frees all memory in the virtual memory range.
253 code_range_ = NULL; 265 code_range_ = NULL;
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
338 } 350 }
339 351
340 352
341 Address MemoryAllocator::ReserveAlignedMemory(size_t size, 353 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
342 size_t alignment, 354 size_t alignment,
343 VirtualMemory* controller) { 355 VirtualMemory* controller) {
344 VirtualMemory reservation(size, alignment); 356 VirtualMemory reservation(size, alignment);
345 357
346 if (!reservation.IsReserved()) return NULL; 358 if (!reservation.IsReserved()) return NULL;
347 size_ += reservation.size(); 359 size_ += reservation.size();
348 Address base = RoundUp(static_cast<Address>(reservation.address()), 360 Address base = static_cast<Address>(reservation.address());
349 alignment);
350 controller->TakeControl(&reservation); 361 controller->TakeControl(&reservation);
351 return base; 362 return base;
352 } 363 }
353 364
354 365
355 Address MemoryAllocator::AllocateAlignedMemory(size_t size, 366 Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
367 size_t commit_size,
356 size_t alignment, 368 size_t alignment,
357 Executability executable, 369 Executability executable,
358 VirtualMemory* controller) { 370 VirtualMemory* controller) {
371 ASSERT(commit_size <= reserve_size);
359 VirtualMemory reservation; 372 VirtualMemory reservation;
360 Address base = ReserveAlignedMemory(size, alignment, &reservation); 373 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
361 if (base == NULL) return NULL; 374 if (base == NULL) return NULL;
362 375
363 if (executable == EXECUTABLE) { 376 if (executable == EXECUTABLE) {
364 if (!CommitCodePage(&reservation, base, size)) { 377 if (!CommitExecutableMemory(&reservation,
378 base,
379 commit_size,
380 reserve_size)) {
365 base = NULL; 381 base = NULL;
366 } 382 }
367 } else { 383 } else {
368 if (!reservation.Commit(base, size, false)) { 384 if (!reservation.Commit(base, commit_size, false)) {
369 base = NULL; 385 base = NULL;
370 } 386 }
371 } 387 }
372 388
373 if (base == NULL) { 389 if (base == NULL) {
374 // Failed to commit the body. Release the mapping and any partially 390 // Failed to commit the body. Release the mapping and any partially
375 // commited regions inside it. 391 // commited regions inside it.
376 reservation.Release(); 392 reservation.Release();
377 return NULL; 393 return NULL;
378 } 394 }
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
462 } 478 }
463 479
464 if (owner == heap->old_data_space()) { 480 if (owner == heap->old_data_space()) {
465 chunk->SetFlag(CONTAINS_ONLY_DATA); 481 chunk->SetFlag(CONTAINS_ONLY_DATA);
466 } 482 }
467 483
468 return chunk; 484 return chunk;
469 } 485 }
470 486
471 487
488 // Commit MemoryChunk area to the requested size.
489 bool MemoryChunk::CommitArea(size_t requested) {
490 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
491 MemoryAllocator::CodePageGuardSize() : 0;
492 size_t header_size = area_start() - address() - guard_size;
493 size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
494 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
495 OS::CommitPageSize());
496
497 if (commit_size == committed_size) {
498 area_end_ = area_start_ + requested;
499 return true;
500 }
danno 2013/01/24 12:38:17 You can remove this block if you tweak the if belo
haitao.feng 2013/01/24 13:27:14 Done.
501
502 if (commit_size > committed_size) {
503 // Commit size should be less or equal than the reserved size.
504 ASSERT(commit_size <= size() - 2 * guard_size);
505 // Append the committed area.
506 Address start = address() + committed_size + guard_size;
507 size_t length = commit_size - committed_size;
508 if (reservation_.IsReserved()) {
509 if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
510 return false;
511 }
512 } else {
513 CodeRange* code_range = heap_->isolate()->code_range();
514 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
515 if (!code_range->CommitRawMemory(start, length)) return false;
516 }
517
518 if (Heap::ShouldZapGarbage()) {
519 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
520 }
521 } else {
danno 2013/01/24 12:38:17 if you make this: else if (commit_size < committe
haitao.feng 2013/01/24 13:27:14 Done.
522 ASSERT(commit_size > 0);
523 // Shrink the committed area.
524 size_t length = committed_size - commit_size;
525 Address start = address() + committed_size + guard_size - length;
526 if (reservation_.IsReserved()) {
527 if (!reservation_.Uncommit(start, length)) return false;
528 } else {
529 CodeRange* code_range = heap_->isolate()->code_range();
530 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
531 if (!code_range->UncommitRawMemory(start, length)) return false;
532 }
533 }
534
535 area_end_ = area_start_ + requested;
536 return true;
537 }
538
539
472 void MemoryChunk::InsertAfter(MemoryChunk* other) { 540 void MemoryChunk::InsertAfter(MemoryChunk* other) {
473 next_chunk_ = other->next_chunk_; 541 next_chunk_ = other->next_chunk_;
474 prev_chunk_ = other; 542 prev_chunk_ = other;
475 other->next_chunk_->prev_chunk_ = this; 543 other->next_chunk_->prev_chunk_ = this;
476 other->next_chunk_ = this; 544 other->next_chunk_ = this;
477 } 545 }
478 546
479 547
480 void MemoryChunk::Unlink() { 548 void MemoryChunk::Unlink() {
481 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { 549 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
482 heap_->decrement_scan_on_scavenge_pages(); 550 heap_->decrement_scan_on_scavenge_pages();
483 ClearFlag(SCAN_ON_SCAVENGE); 551 ClearFlag(SCAN_ON_SCAVENGE);
484 } 552 }
485 next_chunk_->prev_chunk_ = prev_chunk_; 553 next_chunk_->prev_chunk_ = prev_chunk_;
486 prev_chunk_->next_chunk_ = next_chunk_; 554 prev_chunk_->next_chunk_ = next_chunk_;
487 prev_chunk_ = NULL; 555 prev_chunk_ = NULL;
488 next_chunk_ = NULL; 556 next_chunk_ = NULL;
489 } 557 }
490 558
491 559
492 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, 560 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
561 intptr_t commit_area_size,
493 Executability executable, 562 Executability executable,
494 Space* owner) { 563 Space* owner) {
564 ASSERT(commit_area_size <= reserve_area_size);
565
495 size_t chunk_size; 566 size_t chunk_size;
496 Heap* heap = isolate_->heap(); 567 Heap* heap = isolate_->heap();
497 Address base = NULL; 568 Address base = NULL;
498 VirtualMemory reservation; 569 VirtualMemory reservation;
499 Address area_start = NULL; 570 Address area_start = NULL;
500 Address area_end = NULL; 571 Address area_end = NULL;
501 572
573 //
574 // MemoryChunk layout:
575 //
576 // Executable
577 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
578 // | Header |
579 // +----------------------------+<- base + CodePageGuardStartOffset
580 // | Guard |
581 // +----------------------------+<- area_start_
582 // | Area |
583 // +----------------------------+<- area_end_ (area_start + commit_area_size)
584 // | Committed but not used |
585 // +----------------------------+<- aligned at OS page boundary
586 // | Reserved but not committed |
587 // +----------------------------+<- aligned at OS page boundary
588 // | Guard |
589 // +----------------------------+<- base + chunk_size
590 //
591 // Non-executable
592 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
593 // | Header |
594 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
595 // | Area |
596 // +----------------------------+<- area_end_ (area_start + commit_area_size)
597 // | Committed but not used |
598 // +----------------------------+<- aligned at OS page boundary
599 // | Reserved but not committed |
600 // +----------------------------+<- base + chunk_size
601 //
602
502 if (executable == EXECUTABLE) { 603 if (executable == EXECUTABLE) {
503 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, 604 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
504 OS::CommitPageSize()) + CodePageGuardSize(); 605 OS::CommitPageSize()) + CodePageGuardSize();
505 606
506 // Check executable memory limit. 607 // Check executable memory limit.
507 if (size_executable_ + chunk_size > capacity_executable_) { 608 if (size_executable_ + chunk_size > capacity_executable_) {
508 LOG(isolate_, 609 LOG(isolate_,
509 StringEvent("MemoryAllocator::AllocateRawMemory", 610 StringEvent("MemoryAllocator::AllocateRawMemory",
510 "V8 Executable Allocation capacity exceeded")); 611 "V8 Executable Allocation capacity exceeded"));
511 return NULL; 612 return NULL;
512 } 613 }
513 614
615 // Size of header (not executable) plus area (executable).
616 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
617 OS::CommitPageSize());
514 // Allocate executable memory either from code range or from the 618 // Allocate executable memory either from code range or from the
515 // OS. 619 // OS.
516 if (isolate_->code_range()->exists()) { 620 if (isolate_->code_range()->exists()) {
517 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); 621 base = isolate_->code_range()->AllocateRawMemory(chunk_size,
622 commit_size,
623 &chunk_size);
518 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), 624 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
519 MemoryChunk::kAlignment)); 625 MemoryChunk::kAlignment));
520 if (base == NULL) return NULL; 626 if (base == NULL) return NULL;
521 size_ += chunk_size; 627 size_ += chunk_size;
522 // Update executable memory size. 628 // Update executable memory size.
523 size_executable_ += chunk_size; 629 size_executable_ += chunk_size;
524 } else { 630 } else {
525 base = AllocateAlignedMemory(chunk_size, 631 base = AllocateAlignedMemory(chunk_size,
632 commit_size,
526 MemoryChunk::kAlignment, 633 MemoryChunk::kAlignment,
527 executable, 634 executable,
528 &reservation); 635 &reservation);
529 if (base == NULL) return NULL; 636 if (base == NULL) return NULL;
530 // Update executable memory size. 637 // Update executable memory size.
531 size_executable_ += reservation.size(); 638 size_executable_ += reservation.size();
532 } 639 }
533 640
534 if (Heap::ShouldZapGarbage()) { 641 if (Heap::ShouldZapGarbage()) {
535 ZapBlock(base, CodePageGuardStartOffset()); 642 ZapBlock(base, CodePageGuardStartOffset());
536 ZapBlock(base + CodePageAreaStartOffset(), body_size); 643 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
537 } 644 }
538 645
539 area_start = base + CodePageAreaStartOffset(); 646 area_start = base + CodePageAreaStartOffset();
540 area_end = area_start + body_size; 647 area_end = area_start + commit_area_size;
541 } else { 648 } else {
542 chunk_size = MemoryChunk::kObjectStartOffset + body_size; 649 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
650 OS::CommitPageSize());
651 size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
652 commit_area_size, OS::CommitPageSize());
543 base = AllocateAlignedMemory(chunk_size, 653 base = AllocateAlignedMemory(chunk_size,
654 commit_size,
544 MemoryChunk::kAlignment, 655 MemoryChunk::kAlignment,
545 executable, 656 executable,
546 &reservation); 657 &reservation);
547 658
548 if (base == NULL) return NULL; 659 if (base == NULL) return NULL;
549 660
550 if (Heap::ShouldZapGarbage()) { 661 if (Heap::ShouldZapGarbage()) {
551 ZapBlock(base, chunk_size); 662 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
552 } 663 }
553 664
554 area_start = base + Page::kObjectStartOffset; 665 area_start = base + Page::kObjectStartOffset;
555 area_end = base + chunk_size; 666 area_end = area_start + commit_area_size;
556 } 667 }
557 668
669 // Use chunk_size for statistics and callbacks because we assume that they
670 // treat reserved but not-yet committed memory regions of chunks as allocated.
558 isolate_->counters()->memory_allocated()-> 671 isolate_->counters()->memory_allocated()->
559 Increment(static_cast<int>(chunk_size)); 672 Increment(static_cast<int>(chunk_size));
560 673
561 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); 674 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
562 if (owner != NULL) { 675 if (owner != NULL) {
563 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 676 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
564 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 677 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
565 } 678 }
566 679
567 MemoryChunk* result = MemoryChunk::Initialize(heap, 680 MemoryChunk* result = MemoryChunk::Initialize(heap,
568 base, 681 base,
569 chunk_size, 682 chunk_size,
570 area_start, 683 area_start,
571 area_end, 684 area_end,
572 executable, 685 executable,
573 owner); 686 owner);
574 result->set_reserved_memory(&reservation); 687 result->set_reserved_memory(&reservation);
575 return result; 688 return result;
576 } 689 }
577 690
578 691
579 Page* MemoryAllocator::AllocatePage(intptr_t size, 692 Page* MemoryAllocator::AllocatePage(intptr_t size,
580 PagedSpace* owner, 693 PagedSpace* owner,
581 Executability executable) { 694 Executability executable) {
582 MemoryChunk* chunk = AllocateChunk(size, executable, owner); 695 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
583 696
584 if (chunk == NULL) return NULL; 697 if (chunk == NULL) return NULL;
585 698
586 return Page::Initialize(isolate_->heap(), chunk, executable, owner); 699 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
587 } 700 }
588 701
589 702
590 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, 703 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
591 Space* owner, 704 Space* owner,
592 Executability executable) { 705 Executability executable) {
593 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); 706 MemoryChunk* chunk = AllocateChunk(object_size,
707 object_size,
708 executable,
709 owner);
594 if (chunk == NULL) return NULL; 710 if (chunk == NULL) return NULL;
595 return LargePage::Initialize(isolate_->heap(), chunk); 711 return LargePage::Initialize(isolate_->heap(), chunk);
596 } 712 }
597 713
598 714
599 void MemoryAllocator::Free(MemoryChunk* chunk) { 715 void MemoryAllocator::Free(MemoryChunk* chunk) {
600 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); 716 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
601 if (chunk->owner() != NULL) { 717 if (chunk->owner() != NULL) {
602 ObjectSpace space = 718 ObjectSpace space =
603 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); 719 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
725 } 841 }
726 842
727 843
728 int MemoryAllocator::CodePageAreaEndOffset() { 844 int MemoryAllocator::CodePageAreaEndOffset() {
729 // We are guarding code pages: the last OS page will be protected as 845 // We are guarding code pages: the last OS page will be protected as
730 // non-writable. 846 // non-writable.
731 return Page::kPageSize - static_cast<int>(OS::CommitPageSize()); 847 return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
732 } 848 }
733 849
734 850
735 bool MemoryAllocator::CommitCodePage(VirtualMemory* vm, 851 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
736 Address start, 852 Address start,
737 size_t size) { 853 size_t commit_size,
854 size_t reserved_size) {
738 // Commit page header (not executable). 855 // Commit page header (not executable).
739 if (!vm->Commit(start, 856 if (!vm->Commit(start,
740 CodePageGuardStartOffset(), 857 CodePageGuardStartOffset(),
741 false)) { 858 false)) {
742 return false; 859 return false;
743 } 860 }
744 861
745 // Create guard page after the header. 862 // Create guard page after the header.
746 if (!vm->Guard(start + CodePageGuardStartOffset())) { 863 if (!vm->Guard(start + CodePageGuardStartOffset())) {
747 return false; 864 return false;
748 } 865 }
749 866
750 // Commit page body (executable). 867 // Commit page body (executable).
751 size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
752 if (!vm->Commit(start + CodePageAreaStartOffset(), 868 if (!vm->Commit(start + CodePageAreaStartOffset(),
753 area_size, 869 commit_size - CodePageGuardStartOffset(),
754 true)) { 870 true)) {
755 return false; 871 return false;
756 } 872 }
757 873
758 // Create guard page after the allocatable area. 874 // Create guard page before the end.
759 if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) { 875 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
760 return false; 876 return false;
761 } 877 }
762 878
763 return true; 879 return true;
764 } 880 }
765 881
766 882
767 // ----------------------------------------------------------------------------- 883 // -----------------------------------------------------------------------------
768 // MemoryChunk implementation 884 // MemoryChunk implementation
769 885
(...skipping 2215 matching lines...) Expand 10 before | Expand all | Expand 10 after
2985 object->ShortPrint(); 3101 object->ShortPrint();
2986 PrintF("\n"); 3102 PrintF("\n");
2987 } 3103 }
2988 printf(" --------------------------------------\n"); 3104 printf(" --------------------------------------\n");
2989 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3105 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2990 } 3106 }
2991 3107
2992 #endif // DEBUG 3108 #endif // DEBUG
2993 3109
2994 } } // namespace v8::internal 3110 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | test/cctest/test-alloc.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698