Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(15)

Side by Side Diff: src/spaces.cc

Issue 358363002: Move platform abstraction to base library (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: updates Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/base/platform/platform.h"
7 #include "src/full-codegen.h" 8 #include "src/full-codegen.h"
8 #include "src/macro-assembler.h" 9 #include "src/macro-assembler.h"
9 #include "src/mark-compact.h" 10 #include "src/mark-compact.h"
10 #include "src/msan.h" 11 #include "src/msan.h"
11 #include "src/platform.h"
12 12
13 namespace v8 { 13 namespace v8 {
14 namespace internal { 14 namespace internal {
15 15
16 16
17 // ---------------------------------------------------------------------------- 17 // ----------------------------------------------------------------------------
18 // HeapObjectIterator 18 // HeapObjectIterator
19 19
20 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { 20 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
21 // You can't actually iterate over the anchor page. It is not a real page, 21 // You can't actually iterate over the anchor page. It is not a real page,
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
119 // in a kMaximalCodeRangeSize range of virtual address space, so that 119 // in a kMaximalCodeRangeSize range of virtual address space, so that
120 // they can call each other with near calls. 120 // they can call each other with near calls.
121 if (kRequiresCodeRange) { 121 if (kRequiresCodeRange) {
122 requested = kMaximalCodeRangeSize; 122 requested = kMaximalCodeRangeSize;
123 } else { 123 } else {
124 return true; 124 return true;
125 } 125 }
126 } 126 }
127 127
128 ASSERT(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); 128 ASSERT(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
129 code_range_ = new VirtualMemory(requested); 129 code_range_ = new base::VirtualMemory(requested);
130 CHECK(code_range_ != NULL); 130 CHECK(code_range_ != NULL);
131 if (!code_range_->IsReserved()) { 131 if (!code_range_->IsReserved()) {
132 delete code_range_; 132 delete code_range_;
133 code_range_ = NULL; 133 code_range_ = NULL;
134 return false; 134 return false;
135 } 135 }
136 136
137 // We are sure that we have mapped a block of requested addresses. 137 // We are sure that we have mapped a block of requested addresses.
138 ASSERT(code_range_->size() == requested); 138 ASSERT(code_range_->size() == requested);
139 LOG(isolate_, 139 LOG(isolate_,
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
294 // TODO(gc) this will be true again when we fix FreeMemory. 294 // TODO(gc) this will be true again when we fix FreeMemory.
295 // ASSERT(size_executable_ == 0); 295 // ASSERT(size_executable_ == 0);
296 capacity_ = 0; 296 capacity_ = 0;
297 capacity_executable_ = 0; 297 capacity_executable_ = 0;
298 } 298 }
299 299
300 300
301 bool MemoryAllocator::CommitMemory(Address base, 301 bool MemoryAllocator::CommitMemory(Address base,
302 size_t size, 302 size_t size,
303 Executability executable) { 303 Executability executable) {
304 if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) { 304 if (!base::VirtualMemory::CommitRegion(base, size,
305 executable == EXECUTABLE)) {
305 return false; 306 return false;
306 } 307 }
307 UpdateAllocatedSpaceLimits(base, base + size); 308 UpdateAllocatedSpaceLimits(base, base + size);
308 return true; 309 return true;
309 } 310 }
310 311
311 312
312 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, 313 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
313 Executability executable) { 314 Executability executable) {
314 // TODO(gc) make code_range part of memory allocator? 315 // TODO(gc) make code_range part of memory allocator?
315 ASSERT(reservation->IsReserved()); 316 ASSERT(reservation->IsReserved());
316 size_t size = reservation->size(); 317 size_t size = reservation->size();
317 ASSERT(size_ >= size); 318 ASSERT(size_ >= size);
318 size_ -= size; 319 size_ -= size;
319 320
320 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 321 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
321 322
322 if (executable == EXECUTABLE) { 323 if (executable == EXECUTABLE) {
(...skipping 25 matching lines...) Expand all
348 size_executable_ -= size; 349 size_executable_ -= size;
349 } 350 }
350 if (isolate_->code_range() != NULL && 351 if (isolate_->code_range() != NULL &&
351 isolate_->code_range()->contains(static_cast<Address>(base))) { 352 isolate_->code_range()->contains(static_cast<Address>(base))) {
352 ASSERT(executable == EXECUTABLE); 353 ASSERT(executable == EXECUTABLE);
353 isolate_->code_range()->FreeRawMemory(base, size); 354 isolate_->code_range()->FreeRawMemory(base, size);
354 } else { 355 } else {
355 ASSERT(executable == NOT_EXECUTABLE || 356 ASSERT(executable == NOT_EXECUTABLE ||
356 isolate_->code_range() == NULL || 357 isolate_->code_range() == NULL ||
357 !isolate_->code_range()->valid()); 358 !isolate_->code_range()->valid());
358 bool result = VirtualMemory::ReleaseRegion(base, size); 359 bool result = base::VirtualMemory::ReleaseRegion(base, size);
359 USE(result); 360 USE(result);
360 ASSERT(result); 361 ASSERT(result);
361 } 362 }
362 } 363 }
363 364
364 365
365 Address MemoryAllocator::ReserveAlignedMemory(size_t size, 366 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
366 size_t alignment, 367 size_t alignment,
367 VirtualMemory* controller) { 368 base::VirtualMemory* controller) {
368 VirtualMemory reservation(size, alignment); 369 base::VirtualMemory reservation(size, alignment);
369 370
370 if (!reservation.IsReserved()) return NULL; 371 if (!reservation.IsReserved()) return NULL;
371 size_ += reservation.size(); 372 size_ += reservation.size();
372 Address base = RoundUp(static_cast<Address>(reservation.address()), 373 Address base = RoundUp(static_cast<Address>(reservation.address()),
373 alignment); 374 alignment);
374 controller->TakeControl(&reservation); 375 controller->TakeControl(&reservation);
375 return base; 376 return base;
376 } 377 }
377 378
378 379
379 Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size, 380 Address MemoryAllocator::AllocateAlignedMemory(
380 size_t commit_size, 381 size_t reserve_size, size_t commit_size, size_t alignment,
381 size_t alignment, 382 Executability executable, base::VirtualMemory* controller) {
382 Executability executable,
383 VirtualMemory* controller) {
384 ASSERT(commit_size <= reserve_size); 383 ASSERT(commit_size <= reserve_size);
385 VirtualMemory reservation; 384 base::VirtualMemory reservation;
386 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); 385 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
387 if (base == NULL) return NULL; 386 if (base == NULL) return NULL;
388 387
389 if (executable == EXECUTABLE) { 388 if (executable == EXECUTABLE) {
390 if (!CommitExecutableMemory(&reservation, 389 if (!CommitExecutableMemory(&reservation,
391 base, 390 base,
392 commit_size, 391 commit_size,
393 reserve_size)) { 392 reserve_size)) {
394 base = NULL; 393 base = NULL;
395 } 394 }
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
504 503
505 return chunk; 504 return chunk;
506 } 505 }
507 506
508 507
509 // Commit MemoryChunk area to the requested size. 508 // Commit MemoryChunk area to the requested size.
510 bool MemoryChunk::CommitArea(size_t requested) { 509 bool MemoryChunk::CommitArea(size_t requested) {
511 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ? 510 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
512 MemoryAllocator::CodePageGuardSize() : 0; 511 MemoryAllocator::CodePageGuardSize() : 0;
513 size_t header_size = area_start() - address() - guard_size; 512 size_t header_size = area_start() - address() - guard_size;
514 size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize()); 513 size_t commit_size =
514 RoundUp(header_size + requested, base::OS::CommitPageSize());
515 size_t committed_size = RoundUp(header_size + (area_end() - area_start()), 515 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
516 OS::CommitPageSize()); 516 base::OS::CommitPageSize());
517 517
518 if (commit_size > committed_size) { 518 if (commit_size > committed_size) {
519 // Commit size should be less or equal than the reserved size. 519 // Commit size should be less or equal than the reserved size.
520 ASSERT(commit_size <= size() - 2 * guard_size); 520 ASSERT(commit_size <= size() - 2 * guard_size);
521 // Append the committed area. 521 // Append the committed area.
522 Address start = address() + committed_size + guard_size; 522 Address start = address() + committed_size + guard_size;
523 size_t length = commit_size - committed_size; 523 size_t length = commit_size - committed_size;
524 if (reservation_.IsReserved()) { 524 if (reservation_.IsReserved()) {
525 Executability executable = IsFlagSet(IS_EXECUTABLE) 525 Executability executable = IsFlagSet(IS_EXECUTABLE)
526 ? EXECUTABLE : NOT_EXECUTABLE; 526 ? EXECUTABLE : NOT_EXECUTABLE;
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
580 580
581 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, 581 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
582 intptr_t commit_area_size, 582 intptr_t commit_area_size,
583 Executability executable, 583 Executability executable,
584 Space* owner) { 584 Space* owner) {
585 ASSERT(commit_area_size <= reserve_area_size); 585 ASSERT(commit_area_size <= reserve_area_size);
586 586
587 size_t chunk_size; 587 size_t chunk_size;
588 Heap* heap = isolate_->heap(); 588 Heap* heap = isolate_->heap();
589 Address base = NULL; 589 Address base = NULL;
590 VirtualMemory reservation; 590 base::VirtualMemory reservation;
591 Address area_start = NULL; 591 Address area_start = NULL;
592 Address area_end = NULL; 592 Address area_end = NULL;
593 593
594 // 594 //
595 // MemoryChunk layout: 595 // MemoryChunk layout:
596 // 596 //
597 // Executable 597 // Executable
598 // +----------------------------+<- base aligned with MemoryChunk::kAlignment 598 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
599 // | Header | 599 // | Header |
600 // +----------------------------+<- base + CodePageGuardStartOffset 600 // +----------------------------+<- base + CodePageGuardStartOffset
(...skipping 15 matching lines...) Expand all
616 // | Area | 616 // | Area |
617 // +----------------------------+<- area_end_ (area_start + commit_area_size) 617 // +----------------------------+<- area_end_ (area_start + commit_area_size)
618 // | Committed but not used | 618 // | Committed but not used |
619 // +----------------------------+<- aligned at OS page boundary 619 // +----------------------------+<- aligned at OS page boundary
620 // | Reserved but not committed | 620 // | Reserved but not committed |
621 // +----------------------------+<- base + chunk_size 621 // +----------------------------+<- base + chunk_size
622 // 622 //
623 623
624 if (executable == EXECUTABLE) { 624 if (executable == EXECUTABLE) {
625 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, 625 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
626 OS::CommitPageSize()) + CodePageGuardSize(); 626 base::OS::CommitPageSize()) + CodePageGuardSize();
627 627
628 // Check executable memory limit. 628 // Check executable memory limit.
629 if (size_executable_ + chunk_size > capacity_executable_) { 629 if (size_executable_ + chunk_size > capacity_executable_) {
630 LOG(isolate_, 630 LOG(isolate_,
631 StringEvent("MemoryAllocator::AllocateRawMemory", 631 StringEvent("MemoryAllocator::AllocateRawMemory",
632 "V8 Executable Allocation capacity exceeded")); 632 "V8 Executable Allocation capacity exceeded"));
633 return NULL; 633 return NULL;
634 } 634 }
635 635
636 // Size of header (not executable) plus area (executable). 636 // Size of header (not executable) plus area (executable).
637 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, 637 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
638 OS::CommitPageSize()); 638 base::OS::CommitPageSize());
639 // Allocate executable memory either from code range or from the 639 // Allocate executable memory either from code range or from the
640 // OS. 640 // OS.
641 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { 641 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
642 base = isolate_->code_range()->AllocateRawMemory(chunk_size, 642 base = isolate_->code_range()->AllocateRawMemory(chunk_size,
643 commit_size, 643 commit_size,
644 &chunk_size); 644 &chunk_size);
645 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), 645 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
646 MemoryChunk::kAlignment)); 646 MemoryChunk::kAlignment));
647 if (base == NULL) return NULL; 647 if (base == NULL) return NULL;
648 size_ += chunk_size; 648 size_ += chunk_size;
(...skipping 12 matching lines...) Expand all
661 661
662 if (Heap::ShouldZapGarbage()) { 662 if (Heap::ShouldZapGarbage()) {
663 ZapBlock(base, CodePageGuardStartOffset()); 663 ZapBlock(base, CodePageGuardStartOffset());
664 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); 664 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
665 } 665 }
666 666
667 area_start = base + CodePageAreaStartOffset(); 667 area_start = base + CodePageAreaStartOffset();
668 area_end = area_start + commit_area_size; 668 area_end = area_start + commit_area_size;
669 } else { 669 } else {
670 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, 670 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
671 OS::CommitPageSize()); 671 base::OS::CommitPageSize());
672 size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset + 672 size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
673 commit_area_size, OS::CommitPageSize()); 673 commit_area_size, base::OS::CommitPageSize());
674 base = AllocateAlignedMemory(chunk_size, 674 base = AllocateAlignedMemory(chunk_size,
675 commit_size, 675 commit_size,
676 MemoryChunk::kAlignment, 676 MemoryChunk::kAlignment,
677 executable, 677 executable,
678 &reservation); 678 &reservation);
679 679
680 if (base == NULL) return NULL; 680 if (base == NULL) return NULL;
681 681
682 if (Heap::ShouldZapGarbage()) { 682 if (Heap::ShouldZapGarbage()) {
683 ZapBlock(base, Page::kObjectStartOffset + commit_area_size); 683 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
750 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); 750 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
751 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); 751 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
752 } 752 }
753 753
754 isolate_->heap()->RememberUnmappedPage( 754 isolate_->heap()->RememberUnmappedPage(
755 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate()); 755 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
756 756
757 delete chunk->slots_buffer(); 757 delete chunk->slots_buffer();
758 delete chunk->skip_list(); 758 delete chunk->skip_list();
759 759
760 VirtualMemory* reservation = chunk->reserved_memory(); 760 base::VirtualMemory* reservation = chunk->reserved_memory();
761 if (reservation->IsReserved()) { 761 if (reservation->IsReserved()) {
762 FreeMemory(reservation, chunk->executable()); 762 FreeMemory(reservation, chunk->executable());
763 } else { 763 } else {
764 FreeMemory(chunk->address(), 764 FreeMemory(chunk->address(),
765 chunk->size(), 765 chunk->size(),
766 chunk->executable()); 766 chunk->executable());
767 } 767 }
768 } 768 }
769 769
770 770
771 bool MemoryAllocator::CommitBlock(Address start, 771 bool MemoryAllocator::CommitBlock(Address start,
772 size_t size, 772 size_t size,
773 Executability executable) { 773 Executability executable) {
774 if (!CommitMemory(start, size, executable)) return false; 774 if (!CommitMemory(start, size, executable)) return false;
775 775
776 if (Heap::ShouldZapGarbage()) { 776 if (Heap::ShouldZapGarbage()) {
777 ZapBlock(start, size); 777 ZapBlock(start, size);
778 } 778 }
779 779
780 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); 780 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
781 return true; 781 return true;
782 } 782 }
783 783
784 784
785 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { 785 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
786 if (!VirtualMemory::UncommitRegion(start, size)) return false; 786 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
787 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 787 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
788 return true; 788 return true;
789 } 789 }
790 790
791 791
792 void MemoryAllocator::ZapBlock(Address start, size_t size) { 792 void MemoryAllocator::ZapBlock(Address start, size_t size) {
793 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { 793 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
794 Memory::Address_at(start + s) = kZapValue; 794 Memory::Address_at(start + s) = kZapValue;
795 } 795 }
796 } 796 }
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
849 ", used: %" V8_PTR_PREFIX "d" 849 ", used: %" V8_PTR_PREFIX "d"
850 ", available: %%%d\n\n", 850 ", available: %%%d\n\n",
851 capacity_, size_, static_cast<int>(pct*100)); 851 capacity_, size_, static_cast<int>(pct*100));
852 } 852 }
853 #endif 853 #endif
854 854
855 855
856 int MemoryAllocator::CodePageGuardStartOffset() { 856 int MemoryAllocator::CodePageGuardStartOffset() {
857 // We are guarding code pages: the first OS page after the header 857 // We are guarding code pages: the first OS page after the header
858 // will be protected as non-writable. 858 // will be protected as non-writable.
859 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize()); 859 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
860 } 860 }
861 861
862 862
863 int MemoryAllocator::CodePageGuardSize() { 863 int MemoryAllocator::CodePageGuardSize() {
864 return static_cast<int>(OS::CommitPageSize()); 864 return static_cast<int>(base::OS::CommitPageSize());
865 } 865 }
866 866
867 867
868 int MemoryAllocator::CodePageAreaStartOffset() { 868 int MemoryAllocator::CodePageAreaStartOffset() {
869 // We are guarding code pages: the first OS page after the header 869 // We are guarding code pages: the first OS page after the header
870 // will be protected as non-writable. 870 // will be protected as non-writable.
871 return CodePageGuardStartOffset() + CodePageGuardSize(); 871 return CodePageGuardStartOffset() + CodePageGuardSize();
872 } 872 }
873 873
874 874
875 int MemoryAllocator::CodePageAreaEndOffset() { 875 int MemoryAllocator::CodePageAreaEndOffset() {
876 // We are guarding code pages: the last OS page will be protected as 876 // We are guarding code pages: the last OS page will be protected as
877 // non-writable. 877 // non-writable.
878 return Page::kPageSize - static_cast<int>(OS::CommitPageSize()); 878 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
879 } 879 }
880 880
881 881
882 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, 882 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
883 Address start, 883 Address start,
884 size_t commit_size, 884 size_t commit_size,
885 size_t reserved_size) { 885 size_t reserved_size) {
886 // Commit page header (not executable). 886 // Commit page header (not executable).
887 if (!vm->Commit(start, 887 if (!vm->Commit(start,
888 CodePageGuardStartOffset(), 888 CodePageGuardStartOffset(),
889 false)) { 889 false)) {
890 return false; 890 return false;
891 } 891 }
892 892
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
970 while (iterator.has_next()) { 970 while (iterator.has_next()) {
971 heap()->isolate()->memory_allocator()->Free(iterator.next()); 971 heap()->isolate()->memory_allocator()->Free(iterator.next());
972 } 972 }
973 anchor_.set_next_page(&anchor_); 973 anchor_.set_next_page(&anchor_);
974 anchor_.set_prev_page(&anchor_); 974 anchor_.set_prev_page(&anchor_);
975 accounting_stats_.Clear(); 975 accounting_stats_.Clear();
976 } 976 }
977 977
978 978
979 size_t PagedSpace::CommittedPhysicalMemory() { 979 size_t PagedSpace::CommittedPhysicalMemory() {
980 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); 980 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
981 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); 981 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
982 size_t size = 0; 982 size_t size = 0;
983 PageIterator it(this); 983 PageIterator it(this);
984 while (it.has_next()) { 984 while (it.has_next()) {
985 size += it.next()->CommittedPhysicalMemory(); 985 size += it.next()->CommittedPhysicalMemory();
986 } 986 }
987 return size; 987 return size;
988 } 988 }
989 989
990 990
(...skipping 589 matching lines...) Expand 10 before | Expand all | Expand 10 after
1580 if (!Commit()) return false; 1580 if (!Commit()) return false;
1581 } 1581 }
1582 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); 1582 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1583 ASSERT(new_capacity <= maximum_capacity_); 1583 ASSERT(new_capacity <= maximum_capacity_);
1584 ASSERT(new_capacity > capacity_); 1584 ASSERT(new_capacity > capacity_);
1585 int pages_before = capacity_ / Page::kPageSize; 1585 int pages_before = capacity_ / Page::kPageSize;
1586 int pages_after = new_capacity / Page::kPageSize; 1586 int pages_after = new_capacity / Page::kPageSize;
1587 1587
1588 size_t delta = new_capacity - capacity_; 1588 size_t delta = new_capacity - capacity_;
1589 1589
1590 ASSERT(IsAligned(delta, OS::AllocateAlignment())); 1590 ASSERT(IsAligned(delta, base::OS::AllocateAlignment()));
1591 if (!heap()->isolate()->memory_allocator()->CommitBlock( 1591 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1592 start_ + capacity_, delta, executable())) { 1592 start_ + capacity_, delta, executable())) {
1593 return false; 1593 return false;
1594 } 1594 }
1595 SetCapacity(new_capacity); 1595 SetCapacity(new_capacity);
1596 NewSpacePage* last_page = anchor()->prev_page(); 1596 NewSpacePage* last_page = anchor()->prev_page();
1597 ASSERT(last_page != anchor()); 1597 ASSERT(last_page != anchor());
1598 for (int i = pages_before; i < pages_after; i++) { 1598 for (int i = pages_before; i < pages_after; i++) {
1599 Address page_address = start_ + i * Page::kPageSize; 1599 Address page_address = start_ + i * Page::kPageSize;
1600 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), 1600 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1601 page_address, 1601 page_address,
1602 this); 1602 this);
1603 new_page->InsertAfter(last_page); 1603 new_page->InsertAfter(last_page);
1604 Bitmap::Clear(new_page); 1604 Bitmap::Clear(new_page);
1605 // Duplicate the flags that was set on the old page. 1605 // Duplicate the flags that was set on the old page.
1606 new_page->SetFlags(last_page->GetFlags(), 1606 new_page->SetFlags(last_page->GetFlags(),
1607 NewSpacePage::kCopyOnFlipFlagsMask); 1607 NewSpacePage::kCopyOnFlipFlagsMask);
1608 last_page = new_page; 1608 last_page = new_page;
1609 } 1609 }
1610 return true; 1610 return true;
1611 } 1611 }
1612 1612
1613 1613
1614 bool SemiSpace::ShrinkTo(int new_capacity) { 1614 bool SemiSpace::ShrinkTo(int new_capacity) {
1615 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); 1615 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1616 ASSERT(new_capacity >= initial_capacity_); 1616 ASSERT(new_capacity >= initial_capacity_);
1617 ASSERT(new_capacity < capacity_); 1617 ASSERT(new_capacity < capacity_);
1618 if (is_committed()) { 1618 if (is_committed()) {
1619 size_t delta = capacity_ - new_capacity; 1619 size_t delta = capacity_ - new_capacity;
1620 ASSERT(IsAligned(delta, OS::AllocateAlignment())); 1620 ASSERT(IsAligned(delta, base::OS::AllocateAlignment()));
1621 1621
1622 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); 1622 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1623 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { 1623 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1624 return false; 1624 return false;
1625 } 1625 }
1626 1626
1627 int pages_after = new_capacity / Page::kPageSize; 1627 int pages_after = new_capacity / Page::kPageSize;
1628 NewSpacePage* new_last_page = 1628 NewSpacePage* new_last_page =
1629 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); 1629 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1630 new_last_page->set_next_page(anchor()); 1630 new_last_page->set_next_page(anchor());
(...skipping 345 matching lines...) Expand 10 before | Expand all | Expand 10 after
1976 1976
1977 void NewSpace::RecordPromotion(HeapObject* obj) { 1977 void NewSpace::RecordPromotion(HeapObject* obj) {
1978 InstanceType type = obj->map()->instance_type(); 1978 InstanceType type = obj->map()->instance_type();
1979 ASSERT(0 <= type && type <= LAST_TYPE); 1979 ASSERT(0 <= type && type <= LAST_TYPE);
1980 promoted_histogram_[type].increment_number(1); 1980 promoted_histogram_[type].increment_number(1);
1981 promoted_histogram_[type].increment_bytes(obj->Size()); 1981 promoted_histogram_[type].increment_bytes(obj->Size());
1982 } 1982 }
1983 1983
1984 1984
1985 size_t NewSpace::CommittedPhysicalMemory() { 1985 size_t NewSpace::CommittedPhysicalMemory() {
1986 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); 1986 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1987 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); 1987 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1988 size_t size = to_space_.CommittedPhysicalMemory(); 1988 size_t size = to_space_.CommittedPhysicalMemory();
1989 if (from_space_.is_committed()) { 1989 if (from_space_.is_committed()) {
1990 size += from_space_.CommittedPhysicalMemory(); 1990 size += from_space_.CommittedPhysicalMemory();
1991 } 1991 }
1992 return size; 1992 return size;
1993 } 1993 }
1994 1994
1995 1995
1996 // ----------------------------------------------------------------------------- 1996 // -----------------------------------------------------------------------------
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
2068 } 2068 }
2069 } 2069 }
2070 2070
2071 2071
2072 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { 2072 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2073 intptr_t free_bytes = 0; 2073 intptr_t free_bytes = 0;
2074 if (category->top() != NULL) { 2074 if (category->top() != NULL) {
2075 // This is safe (not going to deadlock) since Concatenate operations 2075 // This is safe (not going to deadlock) since Concatenate operations
2076 // are never performed on the same free lists at the same time in 2076 // are never performed on the same free lists at the same time in
2077 // reverse order. 2077 // reverse order.
2078 LockGuard<Mutex> target_lock_guard(mutex()); 2078 base::LockGuard<base::Mutex> target_lock_guard(mutex());
2079 LockGuard<Mutex> source_lock_guard(category->mutex()); 2079 base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
2080 ASSERT(category->end_ != NULL); 2080 ASSERT(category->end_ != NULL);
2081 free_bytes = category->available(); 2081 free_bytes = category->available();
2082 if (end_ == NULL) { 2082 if (end_ == NULL) {
2083 end_ = category->end(); 2083 end_ = category->end();
2084 } else { 2084 } else {
2085 category->end()->set_next(top()); 2085 category->end()->set_next(top());
2086 } 2086 }
2087 set_top(category->top()); 2087 set_top(category->top());
2088 base::NoBarrier_Store(&top_, category->top_); 2088 base::NoBarrier_Store(&top_, category->top_);
2089 available_ += category->available(); 2089 available_ += category->available();
(...skipping 817 matching lines...) Expand 10 before | Expand all | Expand 10 after
2907 heap()->fixed_array_map(); 2907 heap()->fixed_array_map();
2908 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); 2908 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2909 } 2909 }
2910 2910
2911 heap()->incremental_marking()->OldSpaceStep(object_size); 2911 heap()->incremental_marking()->OldSpaceStep(object_size);
2912 return object; 2912 return object;
2913 } 2913 }
2914 2914
2915 2915
2916 size_t LargeObjectSpace::CommittedPhysicalMemory() { 2916 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2917 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); 2917 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2918 size_t size = 0; 2918 size_t size = 0;
2919 LargePage* current = first_page_; 2919 LargePage* current = first_page_;
2920 while (current != NULL) { 2920 while (current != NULL) {
2921 size += current->CommittedPhysicalMemory(); 2921 size += current->CommittedPhysicalMemory();
2922 current = current->next_page(); 2922 current = current->next_page();
2923 } 2923 }
2924 return size; 2924 return size;
2925 } 2925 }
2926 2926
2927 2927
(...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after
3125 object->ShortPrint(); 3125 object->ShortPrint();
3126 PrintF("\n"); 3126 PrintF("\n");
3127 } 3127 }
3128 printf(" --------------------------------------\n"); 3128 printf(" --------------------------------------\n");
3129 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3129 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3130 } 3130 }
3131 3131
3132 #endif // DEBUG 3132 #endif // DEBUG
3133 3133
3134 } } // namespace v8::internal 3134 } } // namespace v8::internal
OLDNEW
« src/base/macros.h ('K') | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698