Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(349)

Side by Side Diff: src/spaces.cc

Issue 12049069: Revert r13494: "Use MemoryChunk-based allocation for deoptimization entry code" (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | test/cctest/test-alloc.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after
199 if (requested <= allocation_list_[current_allocation_block_index_].size) { 199 if (requested <= allocation_list_[current_allocation_block_index_].size) {
200 return; // Found a large enough allocation block. 200 return; // Found a large enough allocation block.
201 } 201 }
202 } 202 }
203 203
204 // Code range is full or too fragmented. 204 // Code range is full or too fragmented.
205 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); 205 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
206 } 206 }
207 207
208 208
209 Address CodeRange::AllocateRawMemory(const size_t requested_size, 209
210 const size_t commit_size, 210 Address CodeRange::AllocateRawMemory(const size_t requested,
211 size_t* allocated) { 211 size_t* allocated) {
212 ASSERT(commit_size <= requested_size);
213 ASSERT(current_allocation_block_index_ < allocation_list_.length()); 212 ASSERT(current_allocation_block_index_ < allocation_list_.length());
214 if (requested_size > allocation_list_[current_allocation_block_index_].size) { 213 if (requested > allocation_list_[current_allocation_block_index_].size) {
215 // Find an allocation block large enough. This function call may 214 // Find an allocation block large enough. This function call may
216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. 215 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
217 GetNextAllocationBlock(requested_size); 216 GetNextAllocationBlock(requested);
218 } 217 }
219 // Commit the requested memory at the start of the current allocation block. 218 // Commit the requested memory at the start of the current allocation block.
220 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); 219 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
221 FreeBlock current = allocation_list_[current_allocation_block_index_]; 220 FreeBlock current = allocation_list_[current_allocation_block_index_];
222 if (aligned_requested >= (current.size - Page::kPageSize)) { 221 if (aligned_requested >= (current.size - Page::kPageSize)) {
223 // Don't leave a small free block, useless for a large object or chunk. 222 // Don't leave a small free block, useless for a large object or chunk.
224 *allocated = current.size; 223 *allocated = current.size;
225 } else { 224 } else {
226 *allocated = aligned_requested; 225 *allocated = aligned_requested;
227 } 226 }
228 ASSERT(*allocated <= current.size); 227 ASSERT(*allocated <= current.size);
229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); 228 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
230 if (!MemoryAllocator::CommitExecutableMemory(code_range_, 229 if (!MemoryAllocator::CommitCodePage(code_range_,
231 current.start, 230 current.start,
232 commit_size, 231 *allocated)) {
233 *allocated)) {
234 *allocated = 0; 232 *allocated = 0;
235 return NULL; 233 return NULL;
236 } 234 }
237 allocation_list_[current_allocation_block_index_].start += *allocated; 235 allocation_list_[current_allocation_block_index_].start += *allocated;
238 allocation_list_[current_allocation_block_index_].size -= *allocated; 236 allocation_list_[current_allocation_block_index_].size -= *allocated;
239 if (*allocated == current.size) { 237 if (*allocated == current.size) {
240 GetNextAllocationBlock(0); // This block is used up, get the next one. 238 GetNextAllocationBlock(0); // This block is used up, get the next one.
241 } 239 }
242 return current.start; 240 return current.start;
243 } 241 }
244 242
245 243
246 bool CodeRange::CommitRawMemory(Address start, size_t length) {
247 return code_range_->Commit(start, length, true);
248 }
249
250
251 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
252 return code_range_->Uncommit(start, length);
253 }
254
255
256 void CodeRange::FreeRawMemory(Address address, size_t length) { 244 void CodeRange::FreeRawMemory(Address address, size_t length) {
257 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); 245 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
258 free_list_.Add(FreeBlock(address, length)); 246 free_list_.Add(FreeBlock(address, length));
259 code_range_->Uncommit(address, length); 247 code_range_->Uncommit(address, length);
260 } 248 }
261 249
262 250
263 void CodeRange::TearDown() { 251 void CodeRange::TearDown() {
264 delete code_range_; // Frees all memory in the virtual memory range. 252 delete code_range_; // Frees all memory in the virtual memory range.
265 code_range_ = NULL; 253 code_range_ = NULL;
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
350 } 338 }
351 339
352 340
353 Address MemoryAllocator::ReserveAlignedMemory(size_t size, 341 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
354 size_t alignment, 342 size_t alignment,
355 VirtualMemory* controller) { 343 VirtualMemory* controller) {
356 VirtualMemory reservation(size, alignment); 344 VirtualMemory reservation(size, alignment);
357 345
358 if (!reservation.IsReserved()) return NULL; 346 if (!reservation.IsReserved()) return NULL;
359 size_ += reservation.size(); 347 size_ += reservation.size();
360 Address base = static_cast<Address>(reservation.address()); 348 Address base = RoundUp(static_cast<Address>(reservation.address()),
349 alignment);
361 controller->TakeControl(&reservation); 350 controller->TakeControl(&reservation);
362 return base; 351 return base;
363 } 352 }
364 353
365 354
366 Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size, 355 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
367 size_t commit_size,
368 size_t alignment, 356 size_t alignment,
369 Executability executable, 357 Executability executable,
370 VirtualMemory* controller) { 358 VirtualMemory* controller) {
371 ASSERT(commit_size <= reserve_size);
372 VirtualMemory reservation; 359 VirtualMemory reservation;
373 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); 360 Address base = ReserveAlignedMemory(size, alignment, &reservation);
374 if (base == NULL) return NULL; 361 if (base == NULL) return NULL;
375 362
376 if (executable == EXECUTABLE) { 363 if (executable == EXECUTABLE) {
377 if (!CommitExecutableMemory(&reservation, 364 if (!CommitCodePage(&reservation, base, size)) {
378 base,
379 commit_size,
380 reserve_size)) {
381 base = NULL; 365 base = NULL;
382 } 366 }
383 } else { 367 } else {
384 if (!reservation.Commit(base, commit_size, false)) { 368 if (!reservation.Commit(base, size, false)) {
385 base = NULL; 369 base = NULL;
386 } 370 }
387 } 371 }
388 372
389 if (base == NULL) { 373 if (base == NULL) {
390 // Failed to commit the body. Release the mapping and any partially 374 // Failed to commit the body. Release the mapping and any partially
391 // commited regions inside it. 375 // commited regions inside it.
392 reservation.Release(); 376 reservation.Release();
393 return NULL; 377 return NULL;
394 } 378 }
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
478 } 462 }
479 463
480 if (owner == heap->old_data_space()) { 464 if (owner == heap->old_data_space()) {
481 chunk->SetFlag(CONTAINS_ONLY_DATA); 465 chunk->SetFlag(CONTAINS_ONLY_DATA);
482 } 466 }
483 467
484 return chunk; 468 return chunk;
485 } 469 }
486 470
487 471
488 // Commit MemoryChunk area to the requested size.
489 bool MemoryChunk::CommitArea(size_t requested) {
490 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
491 MemoryAllocator::CodePageGuardSize() : 0;
492 size_t header_size = area_start() - address() - guard_size;
493 size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
494 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
495 OS::CommitPageSize());
496
497 if (commit_size > committed_size) {
498 // Commit size should be less or equal than the reserved size.
499 ASSERT(commit_size <= size() - 2 * guard_size);
500 // Append the committed area.
501 Address start = address() + committed_size + guard_size;
502 size_t length = commit_size - committed_size;
503 if (reservation_.IsReserved()) {
504 if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
505 return false;
506 }
507 } else {
508 CodeRange* code_range = heap_->isolate()->code_range();
509 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
510 if (!code_range->CommitRawMemory(start, length)) return false;
511 }
512
513 if (Heap::ShouldZapGarbage()) {
514 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
515 }
516 } else if (commit_size < committed_size) {
517 ASSERT(commit_size > 0);
518 // Shrink the committed area.
519 size_t length = committed_size - commit_size;
520 Address start = address() + committed_size + guard_size - length;
521 if (reservation_.IsReserved()) {
522 if (!reservation_.Uncommit(start, length)) return false;
523 } else {
524 CodeRange* code_range = heap_->isolate()->code_range();
525 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
526 if (!code_range->UncommitRawMemory(start, length)) return false;
527 }
528 }
529
530 area_end_ = area_start_ + requested;
531 return true;
532 }
533
534
535 void MemoryChunk::InsertAfter(MemoryChunk* other) { 472 void MemoryChunk::InsertAfter(MemoryChunk* other) {
536 next_chunk_ = other->next_chunk_; 473 next_chunk_ = other->next_chunk_;
537 prev_chunk_ = other; 474 prev_chunk_ = other;
538 other->next_chunk_->prev_chunk_ = this; 475 other->next_chunk_->prev_chunk_ = this;
539 other->next_chunk_ = this; 476 other->next_chunk_ = this;
540 } 477 }
541 478
542 479
543 void MemoryChunk::Unlink() { 480 void MemoryChunk::Unlink() {
544 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { 481 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
545 heap_->decrement_scan_on_scavenge_pages(); 482 heap_->decrement_scan_on_scavenge_pages();
546 ClearFlag(SCAN_ON_SCAVENGE); 483 ClearFlag(SCAN_ON_SCAVENGE);
547 } 484 }
548 next_chunk_->prev_chunk_ = prev_chunk_; 485 next_chunk_->prev_chunk_ = prev_chunk_;
549 prev_chunk_->next_chunk_ = next_chunk_; 486 prev_chunk_->next_chunk_ = next_chunk_;
550 prev_chunk_ = NULL; 487 prev_chunk_ = NULL;
551 next_chunk_ = NULL; 488 next_chunk_ = NULL;
552 } 489 }
553 490
554 491
555 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, 492 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
556 intptr_t commit_area_size,
557 Executability executable, 493 Executability executable,
558 Space* owner) { 494 Space* owner) {
559 ASSERT(commit_area_size <= reserve_area_size);
560
561 size_t chunk_size; 495 size_t chunk_size;
562 Heap* heap = isolate_->heap(); 496 Heap* heap = isolate_->heap();
563 Address base = NULL; 497 Address base = NULL;
564 VirtualMemory reservation; 498 VirtualMemory reservation;
565 Address area_start = NULL; 499 Address area_start = NULL;
566 Address area_end = NULL; 500 Address area_end = NULL;
567 501
568 //
569 // MemoryChunk layout:
570 //
571 // Executable
572 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
573 // | Header |
574 // +----------------------------+<- base + CodePageGuardStartOffset
575 // | Guard |
576 // +----------------------------+<- area_start_
577 // | Area |
578 // +----------------------------+<- area_end_ (area_start + commit_area_size)
579 // | Committed but not used |
580 // +----------------------------+<- aligned at OS page boundary
581 // | Reserved but not committed |
582 // +----------------------------+<- aligned at OS page boundary
583 // | Guard |
584 // +----------------------------+<- base + chunk_size
585 //
586 // Non-executable
587 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
588 // | Header |
589 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
590 // | Area |
591 // +----------------------------+<- area_end_ (area_start + commit_area_size)
592 // | Committed but not used |
593 // +----------------------------+<- aligned at OS page boundary
594 // | Reserved but not committed |
595 // +----------------------------+<- base + chunk_size
596 //
597
598 if (executable == EXECUTABLE) { 502 if (executable == EXECUTABLE) {
599 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, 503 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
600 OS::CommitPageSize()) + CodePageGuardSize(); 504 OS::CommitPageSize()) + CodePageGuardSize();
601 505
602 // Check executable memory limit. 506 // Check executable memory limit.
603 if (size_executable_ + chunk_size > capacity_executable_) { 507 if (size_executable_ + chunk_size > capacity_executable_) {
604 LOG(isolate_, 508 LOG(isolate_,
605 StringEvent("MemoryAllocator::AllocateRawMemory", 509 StringEvent("MemoryAllocator::AllocateRawMemory",
606 "V8 Executable Allocation capacity exceeded")); 510 "V8 Executable Allocation capacity exceeded"));
607 return NULL; 511 return NULL;
608 } 512 }
609 513
610 // Size of header (not executable) plus area (executable).
611 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
612 OS::CommitPageSize());
613 // Allocate executable memory either from code range or from the 514 // Allocate executable memory either from code range or from the
614 // OS. 515 // OS.
615 if (isolate_->code_range()->exists()) { 516 if (isolate_->code_range()->exists()) {
616 base = isolate_->code_range()->AllocateRawMemory(chunk_size, 517 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
617 commit_size,
618 &chunk_size);
619 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), 518 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
620 MemoryChunk::kAlignment)); 519 MemoryChunk::kAlignment));
621 if (base == NULL) return NULL; 520 if (base == NULL) return NULL;
622 size_ += chunk_size; 521 size_ += chunk_size;
623 // Update executable memory size. 522 // Update executable memory size.
624 size_executable_ += chunk_size; 523 size_executable_ += chunk_size;
625 } else { 524 } else {
626 base = AllocateAlignedMemory(chunk_size, 525 base = AllocateAlignedMemory(chunk_size,
627 commit_size,
628 MemoryChunk::kAlignment, 526 MemoryChunk::kAlignment,
629 executable, 527 executable,
630 &reservation); 528 &reservation);
631 if (base == NULL) return NULL; 529 if (base == NULL) return NULL;
632 // Update executable memory size. 530 // Update executable memory size.
633 size_executable_ += reservation.size(); 531 size_executable_ += reservation.size();
634 } 532 }
635 533
636 if (Heap::ShouldZapGarbage()) { 534 if (Heap::ShouldZapGarbage()) {
637 ZapBlock(base, CodePageGuardStartOffset()); 535 ZapBlock(base, CodePageGuardStartOffset());
638 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); 536 ZapBlock(base + CodePageAreaStartOffset(), body_size);
639 } 537 }
640 538
641 area_start = base + CodePageAreaStartOffset(); 539 area_start = base + CodePageAreaStartOffset();
642 area_end = area_start + commit_area_size; 540 area_end = area_start + body_size;
643 } else { 541 } else {
644 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, 542 chunk_size = MemoryChunk::kObjectStartOffset + body_size;
645 OS::CommitPageSize());
646 size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
647 commit_area_size, OS::CommitPageSize());
648 base = AllocateAlignedMemory(chunk_size, 543 base = AllocateAlignedMemory(chunk_size,
649 commit_size,
650 MemoryChunk::kAlignment, 544 MemoryChunk::kAlignment,
651 executable, 545 executable,
652 &reservation); 546 &reservation);
653 547
654 if (base == NULL) return NULL; 548 if (base == NULL) return NULL;
655 549
656 if (Heap::ShouldZapGarbage()) { 550 if (Heap::ShouldZapGarbage()) {
657 ZapBlock(base, Page::kObjectStartOffset + commit_area_size); 551 ZapBlock(base, chunk_size);
658 } 552 }
659 553
660 area_start = base + Page::kObjectStartOffset; 554 area_start = base + Page::kObjectStartOffset;
661 area_end = area_start + commit_area_size; 555 area_end = base + chunk_size;
662 } 556 }
663 557
664 // Use chunk_size for statistics and callbacks because we assume that they
665 // treat reserved but not-yet committed memory regions of chunks as allocated.
666 isolate_->counters()->memory_allocated()-> 558 isolate_->counters()->memory_allocated()->
667 Increment(static_cast<int>(chunk_size)); 559 Increment(static_cast<int>(chunk_size));
668 560
669 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); 561 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
670 if (owner != NULL) { 562 if (owner != NULL) {
671 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 563 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
672 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 564 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
673 } 565 }
674 566
675 MemoryChunk* result = MemoryChunk::Initialize(heap, 567 MemoryChunk* result = MemoryChunk::Initialize(heap,
676 base, 568 base,
677 chunk_size, 569 chunk_size,
678 area_start, 570 area_start,
679 area_end, 571 area_end,
680 executable, 572 executable,
681 owner); 573 owner);
682 result->set_reserved_memory(&reservation); 574 result->set_reserved_memory(&reservation);
683 return result; 575 return result;
684 } 576 }
685 577
686 578
687 Page* MemoryAllocator::AllocatePage(intptr_t size, 579 Page* MemoryAllocator::AllocatePage(intptr_t size,
688 PagedSpace* owner, 580 PagedSpace* owner,
689 Executability executable) { 581 Executability executable) {
690 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); 582 MemoryChunk* chunk = AllocateChunk(size, executable, owner);
691 583
692 if (chunk == NULL) return NULL; 584 if (chunk == NULL) return NULL;
693 585
694 return Page::Initialize(isolate_->heap(), chunk, executable, owner); 586 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
695 } 587 }
696 588
697 589
698 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, 590 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
699 Space* owner, 591 Space* owner,
700 Executability executable) { 592 Executability executable) {
701 MemoryChunk* chunk = AllocateChunk(object_size, 593 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
702 object_size,
703 executable,
704 owner);
705 if (chunk == NULL) return NULL; 594 if (chunk == NULL) return NULL;
706 return LargePage::Initialize(isolate_->heap(), chunk); 595 return LargePage::Initialize(isolate_->heap(), chunk);
707 } 596 }
708 597
709 598
710 void MemoryAllocator::Free(MemoryChunk* chunk) { 599 void MemoryAllocator::Free(MemoryChunk* chunk) {
711 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); 600 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
712 if (chunk->owner() != NULL) { 601 if (chunk->owner() != NULL) {
713 ObjectSpace space = 602 ObjectSpace space =
714 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); 603 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
836 } 725 }
837 726
838 727
839 int MemoryAllocator::CodePageAreaEndOffset() { 728 int MemoryAllocator::CodePageAreaEndOffset() {
840 // We are guarding code pages: the last OS page will be protected as 729 // We are guarding code pages: the last OS page will be protected as
841 // non-writable. 730 // non-writable.
842 return Page::kPageSize - static_cast<int>(OS::CommitPageSize()); 731 return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
843 } 732 }
844 733
845 734
846 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, 735 bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
847 Address start, 736 Address start,
848 size_t commit_size, 737 size_t size) {
849 size_t reserved_size) {
850 // Commit page header (not executable). 738 // Commit page header (not executable).
851 if (!vm->Commit(start, 739 if (!vm->Commit(start,
852 CodePageGuardStartOffset(), 740 CodePageGuardStartOffset(),
853 false)) { 741 false)) {
854 return false; 742 return false;
855 } 743 }
856 744
857 // Create guard page after the header. 745 // Create guard page after the header.
858 if (!vm->Guard(start + CodePageGuardStartOffset())) { 746 if (!vm->Guard(start + CodePageGuardStartOffset())) {
859 return false; 747 return false;
860 } 748 }
861 749
862 // Commit page body (executable). 750 // Commit page body (executable).
751 size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
863 if (!vm->Commit(start + CodePageAreaStartOffset(), 752 if (!vm->Commit(start + CodePageAreaStartOffset(),
864 commit_size - CodePageGuardStartOffset(), 753 area_size,
865 true)) { 754 true)) {
866 return false; 755 return false;
867 } 756 }
868 757
869 // Create guard page before the end. 758 // Create guard page after the allocatable area.
870 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { 759 if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
871 return false; 760 return false;
872 } 761 }
873 762
874 return true; 763 return true;
875 } 764 }
876 765
877 766
878 // ----------------------------------------------------------------------------- 767 // -----------------------------------------------------------------------------
879 // MemoryChunk implementation 768 // MemoryChunk implementation
880 769
(...skipping 2215 matching lines...) Expand 10 before | Expand all | Expand 10 after
3096 object->ShortPrint(); 2985 object->ShortPrint();
3097 PrintF("\n"); 2986 PrintF("\n");
3098 } 2987 }
3099 printf(" --------------------------------------\n"); 2988 printf(" --------------------------------------\n");
3100 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 2989 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3101 } 2990 }
3102 2991
3103 #endif // DEBUG 2992 #endif // DEBUG
3104 2993
3105 } } // namespace v8::internal 2994 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | test/cctest/test-alloc.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698