OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/spaces.h" | 5 #include "src/heap/spaces.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
9 #include "src/full-codegen/full-codegen.h" | 9 #include "src/full-codegen/full-codegen.h" |
10 #include "src/heap/mark-compact.h" | 10 #include "src/heap/mark-compact.h" |
(...skipping 307 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
318 | 318 |
319 size_ = 0; | 319 size_ = 0; |
320 size_executable_ = 0; | 320 size_executable_ = 0; |
321 | 321 |
322 return true; | 322 return true; |
323 } | 323 } |
324 | 324 |
325 | 325 |
326 void MemoryAllocator::TearDown() { | 326 void MemoryAllocator::TearDown() { |
327 // Check that spaces were torn down before MemoryAllocator. | 327 // Check that spaces were torn down before MemoryAllocator. |
328 DCHECK(size_ == 0); | 328 DCHECK(size_.Value() == 0); |
329 // TODO(gc) this will be true again when we fix FreeMemory. | 329 // TODO(gc) this will be true again when we fix FreeMemory. |
330 // DCHECK(size_executable_ == 0); | 330 // DCHECK(size_executable_ == 0); |
331 capacity_ = 0; | 331 capacity_ = 0; |
332 capacity_executable_ = 0; | 332 capacity_executable_ = 0; |
333 } | 333 } |
334 | 334 |
335 | 335 |
336 bool MemoryAllocator::CommitMemory(Address base, size_t size, | 336 bool MemoryAllocator::CommitMemory(Address base, size_t size, |
337 Executability executable) { | 337 Executability executable) { |
338 if (!base::VirtualMemory::CommitRegion(base, size, | 338 if (!base::VirtualMemory::CommitRegion(base, size, |
339 executable == EXECUTABLE)) { | 339 executable == EXECUTABLE)) { |
340 return false; | 340 return false; |
341 } | 341 } |
342 UpdateAllocatedSpaceLimits(base, base + size); | 342 UpdateAllocatedSpaceLimits(base, base + size); |
343 return true; | 343 return true; |
344 } | 344 } |
345 | 345 |
346 | 346 |
347 void MemoryAllocator::FreeNewSpaceMemory(Address addr, | 347 void MemoryAllocator::FreeNewSpaceMemory(Address addr, |
348 base::VirtualMemory* reservation, | 348 base::VirtualMemory* reservation, |
349 Executability executable) { | 349 Executability executable) { |
350 LOG(isolate_, DeleteEvent("NewSpace", addr)); | 350 LOG(isolate_, DeleteEvent("NewSpace", addr)); |
351 | 351 |
352 DCHECK(reservation->IsReserved()); | 352 DCHECK(reservation->IsReserved()); |
353 const size_t size = reservation->size(); | 353 const intptr_t size = static_cast<intptr_t>(reservation->size()); |
354 DCHECK(size_ >= size); | 354 DCHECK(size_.Value() >= size); |
355 size_ -= size; | 355 size_.Increment(-size); |
356 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 356 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
357 FreeMemory(reservation, NOT_EXECUTABLE); | 357 FreeMemory(reservation, NOT_EXECUTABLE); |
358 } | 358 } |
359 | 359 |
360 | 360 |
361 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, | 361 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, |
362 Executability executable) { | 362 Executability executable) { |
363 // TODO(gc) make code_range part of memory allocator? | 363 // TODO(gc) make code_range part of memory allocator? |
364 // Code which is part of the code-range does not have its own VirtualMemory. | 364 // Code which is part of the code-range does not have its own VirtualMemory. |
365 DCHECK(isolate_->code_range() == NULL || | 365 DCHECK(isolate_->code_range() == NULL || |
(...skipping 22 matching lines...) Expand all Loading... |
388 DCHECK(result); | 388 DCHECK(result); |
389 } | 389 } |
390 } | 390 } |
391 | 391 |
392 | 392 |
393 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, | 393 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, |
394 base::VirtualMemory* controller) { | 394 base::VirtualMemory* controller) { |
395 base::VirtualMemory reservation(size, alignment); | 395 base::VirtualMemory reservation(size, alignment); |
396 | 396 |
397 if (!reservation.IsReserved()) return NULL; | 397 if (!reservation.IsReserved()) return NULL; |
398 size_ += reservation.size(); | 398 size_.Increment(static_cast<intptr_t>(reservation.size())); |
399 Address base = | 399 Address base = |
400 RoundUp(static_cast<Address>(reservation.address()), alignment); | 400 RoundUp(static_cast<Address>(reservation.address()), alignment); |
401 controller->TakeControl(&reservation); | 401 controller->TakeControl(&reservation); |
402 return base; | 402 return base; |
403 } | 403 } |
404 | 404 |
405 | 405 |
406 Address MemoryAllocator::AllocateAlignedMemory( | 406 Address MemoryAllocator::AllocateAlignedMemory( |
407 size_t reserve_size, size_t commit_size, size_t alignment, | 407 size_t reserve_size, size_t commit_size, size_t alignment, |
408 Executability executable, base::VirtualMemory* controller) { | 408 Executability executable, base::VirtualMemory* controller) { |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
486 chunk->size_ = size; | 486 chunk->size_ = size; |
487 chunk->area_start_ = area_start; | 487 chunk->area_start_ = area_start; |
488 chunk->area_end_ = area_end; | 488 chunk->area_end_ = area_end; |
489 chunk->flags_ = 0; | 489 chunk->flags_ = 0; |
490 chunk->set_owner(owner); | 490 chunk->set_owner(owner); |
491 chunk->InitializeReservedMemory(); | 491 chunk->InitializeReservedMemory(); |
492 chunk->slots_buffer_ = NULL; | 492 chunk->slots_buffer_ = NULL; |
493 chunk->skip_list_ = NULL; | 493 chunk->skip_list_ = NULL; |
494 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; | 494 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; |
495 chunk->progress_bar_ = 0; | 495 chunk->progress_bar_ = 0; |
496 chunk->high_water_mark_ = static_cast<int>(area_start - base); | 496 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); |
497 chunk->set_parallel_sweeping(SWEEPING_DONE); | 497 chunk->set_parallel_sweeping(SWEEPING_DONE); |
498 chunk->mutex_ = NULL; | 498 chunk->mutex_ = NULL; |
499 chunk->available_in_small_free_list_ = 0; | 499 chunk->available_in_small_free_list_ = 0; |
500 chunk->available_in_medium_free_list_ = 0; | 500 chunk->available_in_medium_free_list_ = 0; |
501 chunk->available_in_large_free_list_ = 0; | 501 chunk->available_in_large_free_list_ = 0; |
502 chunk->available_in_huge_free_list_ = 0; | 502 chunk->available_in_huge_free_list_ = 0; |
503 chunk->non_available_small_blocks_ = 0; | 503 chunk->non_available_small_blocks_ = 0; |
504 chunk->ResetLiveBytes(); | 504 chunk->ResetLiveBytes(); |
505 Bitmap::Clear(chunk); | 505 Bitmap::Clear(chunk); |
506 chunk->initialize_scan_on_scavenge(false); | 506 chunk->initialize_scan_on_scavenge(false); |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
632 // | Reserved but not committed | | 632 // | Reserved but not committed | |
633 // +----------------------------+<- base + chunk_size | 633 // +----------------------------+<- base + chunk_size |
634 // | 634 // |
635 | 635 |
636 if (executable == EXECUTABLE) { | 636 if (executable == EXECUTABLE) { |
637 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, | 637 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, |
638 base::OS::CommitPageSize()) + | 638 base::OS::CommitPageSize()) + |
639 CodePageGuardSize(); | 639 CodePageGuardSize(); |
640 | 640 |
641 // Check executable memory limit. | 641 // Check executable memory limit. |
642 if ((size_executable_ + chunk_size) > capacity_executable_) { | 642 if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) > |
| 643 capacity_executable_) { |
643 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", | 644 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", |
644 "V8 Executable Allocation capacity exceeded")); | 645 "V8 Executable Allocation capacity exceeded")); |
645 return NULL; | 646 return NULL; |
646 } | 647 } |
647 | 648 |
648 // Size of header (not executable) plus area (executable). | 649 // Size of header (not executable) plus area (executable). |
649 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, | 650 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, |
650 base::OS::CommitPageSize()); | 651 base::OS::CommitPageSize()); |
651 // Allocate executable memory either from code range or from the | 652 // Allocate executable memory either from code range or from the |
652 // OS. | 653 // OS. |
653 #ifdef V8_TARGET_ARCH_MIPS64 | 654 #ifdef V8_TARGET_ARCH_MIPS64 |
654 // Use code range only for large object space on mips64 to keep address | 655 // Use code range only for large object space on mips64 to keep address |
655 // range within 256-MB memory region. | 656 // range within 256-MB memory region. |
656 if (isolate_->code_range() != NULL && isolate_->code_range()->valid() && | 657 if (isolate_->code_range() != NULL && isolate_->code_range()->valid() && |
657 reserve_area_size > CodePageAreaSize()) { | 658 reserve_area_size > CodePageAreaSize()) { |
658 #else | 659 #else |
659 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { | 660 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { |
660 #endif | 661 #endif |
661 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size, | 662 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size, |
662 &chunk_size); | 663 &chunk_size); |
663 DCHECK( | 664 DCHECK( |
664 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); | 665 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); |
665 if (base == NULL) return NULL; | 666 if (base == NULL) return NULL; |
666 size_ += chunk_size; | 667 size_.Increment(static_cast<intptr_t>(chunk_size)); |
667 // Update executable memory size. | 668 // Update executable memory size. |
668 size_executable_ += chunk_size; | 669 size_executable_.Increment(static_cast<intptr_t>(chunk_size)); |
669 } else { | 670 } else { |
670 base = AllocateAlignedMemory(chunk_size, commit_size, | 671 base = AllocateAlignedMemory(chunk_size, commit_size, |
671 MemoryChunk::kAlignment, executable, | 672 MemoryChunk::kAlignment, executable, |
672 &reservation); | 673 &reservation); |
673 if (base == NULL) return NULL; | 674 if (base == NULL) return NULL; |
674 // Update executable memory size. | 675 // Update executable memory size. |
675 size_executable_ += reservation.size(); | 676 size_executable_.Increment(static_cast<intptr_t>(chunk_size)); |
676 } | 677 } |
677 | 678 |
678 if (Heap::ShouldZapGarbage()) { | 679 if (Heap::ShouldZapGarbage()) { |
679 ZapBlock(base, CodePageGuardStartOffset()); | 680 ZapBlock(base, CodePageGuardStartOffset()); |
680 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); | 681 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); |
681 } | 682 } |
682 | 683 |
683 area_start = base + CodePageAreaStartOffset(); | 684 area_start = base + CodePageAreaStartOffset(); |
684 area_end = area_start + commit_area_size; | 685 area_end = area_start + commit_area_size; |
685 } else { | 686 } else { |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
752 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 753 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
753 if (chunk->owner() != NULL) { | 754 if (chunk->owner() != NULL) { |
754 ObjectSpace space = | 755 ObjectSpace space = |
755 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | 756 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
756 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); | 757 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
757 } | 758 } |
758 | 759 |
759 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), | 760 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), |
760 chunk->IsEvacuationCandidate()); | 761 chunk->IsEvacuationCandidate()); |
761 | 762 |
762 size_t size; | 763 intptr_t size; |
763 base::VirtualMemory* reservation = chunk->reserved_memory(); | 764 base::VirtualMemory* reservation = chunk->reserved_memory(); |
764 if (reservation->IsReserved()) { | 765 if (reservation->IsReserved()) { |
765 size = reservation->size(); | 766 size = static_cast<intptr_t>(reservation->size()); |
766 } else { | 767 } else { |
767 size = chunk->size(); | 768 size = static_cast<intptr_t>(chunk->size()); |
768 } | 769 } |
769 DCHECK(size_ >= size); | 770 DCHECK(size_.Value() >= size); |
770 size_ -= size; | 771 size_.Increment(-size); |
771 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 772 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
772 | 773 |
773 if (chunk->executable() == EXECUTABLE) { | 774 if (chunk->executable() == EXECUTABLE) { |
774 DCHECK(size_executable_ >= size); | 775 DCHECK(size_executable_.Value() >= size); |
775 size_executable_ -= size; | 776 size_executable_.Increment(-size); |
776 } | 777 } |
777 | 778 |
778 chunk->SetFlag(MemoryChunk::PRE_FREED); | 779 chunk->SetFlag(MemoryChunk::PRE_FREED); |
779 } | 780 } |
780 | 781 |
781 | 782 |
782 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { | 783 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { |
783 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); | 784 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
784 chunk->ReleaseAllocatedMemory(); | 785 chunk->ReleaseAllocatedMemory(); |
785 | 786 |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
865 memory_allocation_callbacks_.Remove(i); | 866 memory_allocation_callbacks_.Remove(i); |
866 return; | 867 return; |
867 } | 868 } |
868 } | 869 } |
869 UNREACHABLE(); | 870 UNREACHABLE(); |
870 } | 871 } |
871 | 872 |
872 | 873 |
873 #ifdef DEBUG | 874 #ifdef DEBUG |
874 void MemoryAllocator::ReportStatistics() { | 875 void MemoryAllocator::ReportStatistics() { |
875 float pct = static_cast<float>(capacity_ - size_) / capacity_; | 876 intptr_t size = Size(); |
| 877 float pct = static_cast<float>(capacity_ - size) / capacity_; |
876 PrintF(" capacity: %" V8_PTR_PREFIX | 878 PrintF(" capacity: %" V8_PTR_PREFIX |
877 "d" | 879 "d" |
878 ", used: %" V8_PTR_PREFIX | 880 ", used: %" V8_PTR_PREFIX |
879 "d" | 881 "d" |
880 ", available: %%%d\n\n", | 882 ", available: %%%d\n\n", |
881 capacity_, size_, static_cast<int>(pct * 100)); | 883 capacity_, size, static_cast<int>(pct * 100)); |
882 } | 884 } |
883 #endif | 885 #endif |
884 | 886 |
885 | 887 |
886 int MemoryAllocator::CodePageGuardStartOffset() { | 888 int MemoryAllocator::CodePageGuardStartOffset() { |
887 // We are guarding code pages: the first OS page after the header | 889 // We are guarding code pages: the first OS page after the header |
888 // will be protected as non-writable. | 890 // will be protected as non-writable. |
889 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); | 891 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); |
890 } | 892 } |
891 | 893 |
(...skipping 2293 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3185 object->ShortPrint(); | 3187 object->ShortPrint(); |
3186 PrintF("\n"); | 3188 PrintF("\n"); |
3187 } | 3189 } |
3188 printf(" --------------------------------------\n"); | 3190 printf(" --------------------------------------\n"); |
3189 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3191 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3190 } | 3192 } |
3191 | 3193 |
3192 #endif // DEBUG | 3194 #endif // DEBUG |
3193 } // namespace internal | 3195 } // namespace internal |
3194 } // namespace v8 | 3196 } // namespace v8 |
OLD | NEW |