| Index: src/spaces.cc
|
| diff --git a/src/spaces.cc b/src/spaces.cc
|
| index 194f75dfbfa56e0c3498cb83f63ade07df0596eb..ab97f9d289ac690cd78e0a0d699f0fac20035623 100644
|
| --- a/src/spaces.cc
|
| +++ b/src/spaces.cc
|
| @@ -4,11 +4,11 @@
|
|
|
| #include "src/v8.h"
|
|
|
| +#include "src/base/platform/platform.h"
|
| #include "src/full-codegen.h"
|
| #include "src/macro-assembler.h"
|
| #include "src/mark-compact.h"
|
| #include "src/msan.h"
|
| -#include "src/platform.h"
|
|
|
| namespace v8 {
|
| namespace internal {
|
| @@ -126,7 +126,7 @@ bool CodeRange::SetUp(size_t requested) {
|
| }
|
|
|
| ASSERT(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
|
| - code_range_ = new VirtualMemory(requested);
|
| + code_range_ = new base::VirtualMemory(requested);
|
| CHECK(code_range_ != NULL);
|
| if (!code_range_->IsReserved()) {
|
| delete code_range_;
|
| @@ -301,7 +301,8 @@ void MemoryAllocator::TearDown() {
|
| bool MemoryAllocator::CommitMemory(Address base,
|
| size_t size,
|
| Executability executable) {
|
| - if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
|
| + if (!base::VirtualMemory::CommitRegion(base, size,
|
| + executable == EXECUTABLE)) {
|
| return false;
|
| }
|
| UpdateAllocatedSpaceLimits(base, base + size);
|
| @@ -309,7 +310,7 @@ bool MemoryAllocator::CommitMemory(Address base,
|
| }
|
|
|
|
|
| -void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
|
| +void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
|
| Executability executable) {
|
| // TODO(gc) make code_range part of memory allocator?
|
| ASSERT(reservation->IsReserved());
|
| @@ -355,7 +356,7 @@ void MemoryAllocator::FreeMemory(Address base,
|
| ASSERT(executable == NOT_EXECUTABLE ||
|
| isolate_->code_range() == NULL ||
|
| !isolate_->code_range()->valid());
|
| - bool result = VirtualMemory::ReleaseRegion(base, size);
|
| + bool result = base::VirtualMemory::ReleaseRegion(base, size);
|
| USE(result);
|
| ASSERT(result);
|
| }
|
| @@ -364,8 +365,8 @@ void MemoryAllocator::FreeMemory(Address base,
|
|
|
| Address MemoryAllocator::ReserveAlignedMemory(size_t size,
|
| size_t alignment,
|
| - VirtualMemory* controller) {
|
| - VirtualMemory reservation(size, alignment);
|
| + base::VirtualMemory* controller) {
|
| + base::VirtualMemory reservation(size, alignment);
|
|
|
| if (!reservation.IsReserved()) return NULL;
|
| size_ += reservation.size();
|
| @@ -376,13 +377,11 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
|
| }
|
|
|
|
|
| -Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
|
| - size_t commit_size,
|
| - size_t alignment,
|
| - Executability executable,
|
| - VirtualMemory* controller) {
|
| +Address MemoryAllocator::AllocateAlignedMemory(
|
| + size_t reserve_size, size_t commit_size, size_t alignment,
|
| + Executability executable, base::VirtualMemory* controller) {
|
| ASSERT(commit_size <= reserve_size);
|
| - VirtualMemory reservation;
|
| + base::VirtualMemory reservation;
|
| Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
|
| if (base == NULL) return NULL;
|
|
|
| @@ -511,9 +510,10 @@ bool MemoryChunk::CommitArea(size_t requested) {
|
| size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
|
| MemoryAllocator::CodePageGuardSize() : 0;
|
| size_t header_size = area_start() - address() - guard_size;
|
| - size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
|
| + size_t commit_size =
|
| + RoundUp(header_size + requested, base::OS::CommitPageSize());
|
| size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
|
| - OS::CommitPageSize());
|
| + base::OS::CommitPageSize());
|
|
|
| if (commit_size > committed_size) {
|
| // Commit size should be less or equal than the reserved size.
|
| @@ -587,7 +587,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
|
| size_t chunk_size;
|
| Heap* heap = isolate_->heap();
|
| Address base = NULL;
|
| - VirtualMemory reservation;
|
| + base::VirtualMemory reservation;
|
| Address area_start = NULL;
|
| Address area_end = NULL;
|
|
|
| @@ -623,7 +623,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
|
|
|
| if (executable == EXECUTABLE) {
|
| chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
|
| - OS::CommitPageSize()) + CodePageGuardSize();
|
| + base::OS::CommitPageSize()) + CodePageGuardSize();
|
|
|
| // Check executable memory limit.
|
| if (size_executable_ + chunk_size > capacity_executable_) {
|
| @@ -635,7 +635,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
|
|
|
| // Size of header (not executable) plus area (executable).
|
| size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
|
| - OS::CommitPageSize());
|
| + base::OS::CommitPageSize());
|
| // Allocate executable memory either from code range or from the
|
| // OS.
|
| if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
|
| @@ -668,9 +668,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
|
| area_end = area_start + commit_area_size;
|
| } else {
|
| chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
|
| - OS::CommitPageSize());
|
| + base::OS::CommitPageSize());
|
| size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
|
| - commit_area_size, OS::CommitPageSize());
|
| + commit_area_size, base::OS::CommitPageSize());
|
| base = AllocateAlignedMemory(chunk_size,
|
| commit_size,
|
| MemoryChunk::kAlignment,
|
| @@ -757,7 +757,7 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
|
| delete chunk->slots_buffer();
|
| delete chunk->skip_list();
|
|
|
| - VirtualMemory* reservation = chunk->reserved_memory();
|
| + base::VirtualMemory* reservation = chunk->reserved_memory();
|
| if (reservation->IsReserved()) {
|
| FreeMemory(reservation, chunk->executable());
|
| } else {
|
| @@ -783,7 +783,7 @@ bool MemoryAllocator::CommitBlock(Address start,
|
|
|
|
|
| bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
|
| - if (!VirtualMemory::UncommitRegion(start, size)) return false;
|
| + if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
|
| isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
|
| return true;
|
| }
|
| @@ -856,12 +856,12 @@ void MemoryAllocator::ReportStatistics() {
|
| int MemoryAllocator::CodePageGuardStartOffset() {
|
| // We are guarding code pages: the first OS page after the header
|
| // will be protected as non-writable.
|
| - return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
|
| + return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
|
| }
|
|
|
|
|
| int MemoryAllocator::CodePageGuardSize() {
|
| - return static_cast<int>(OS::CommitPageSize());
|
| + return static_cast<int>(base::OS::CommitPageSize());
|
| }
|
|
|
|
|
| @@ -875,11 +875,11 @@ int MemoryAllocator::CodePageAreaStartOffset() {
|
| int MemoryAllocator::CodePageAreaEndOffset() {
|
| // We are guarding code pages: the last OS page will be protected as
|
| // non-writable.
|
| - return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
|
| + return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
|
| }
|
|
|
|
|
| -bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
|
| +bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
|
| Address start,
|
| size_t commit_size,
|
| size_t reserved_size) {
|
| @@ -977,7 +977,7 @@ void PagedSpace::TearDown() {
|
|
|
|
|
| size_t PagedSpace::CommittedPhysicalMemory() {
|
| - if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
|
| + if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
|
| MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
| size_t size = 0;
|
| PageIterator it(this);
|
| @@ -1587,7 +1587,7 @@ bool SemiSpace::GrowTo(int new_capacity) {
|
|
|
| size_t delta = new_capacity - capacity_;
|
|
|
| - ASSERT(IsAligned(delta, OS::AllocateAlignment()));
|
| + ASSERT(IsAligned(delta, base::OS::AllocateAlignment()));
|
| if (!heap()->isolate()->memory_allocator()->CommitBlock(
|
| start_ + capacity_, delta, executable())) {
|
| return false;
|
| @@ -1617,7 +1617,7 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
|
| ASSERT(new_capacity < capacity_);
|
| if (is_committed()) {
|
| size_t delta = capacity_ - new_capacity;
|
| - ASSERT(IsAligned(delta, OS::AllocateAlignment()));
|
| + ASSERT(IsAligned(delta, base::OS::AllocateAlignment()));
|
|
|
| MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
|
| if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
|
| @@ -1983,7 +1983,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
|
|
|
|
|
| size_t NewSpace::CommittedPhysicalMemory() {
|
| - if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
|
| + if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
|
| MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
| size_t size = to_space_.CommittedPhysicalMemory();
|
| if (from_space_.is_committed()) {
|
| @@ -2075,8 +2075,8 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
|
| // This is safe (not going to deadlock) since Concatenate operations
|
| // are never performed on the same free lists at the same time in
|
| // reverse order.
|
| - LockGuard<Mutex> target_lock_guard(mutex());
|
| - LockGuard<Mutex> source_lock_guard(category->mutex());
|
| + base::LockGuard<base::Mutex> target_lock_guard(mutex());
|
| + base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
|
| ASSERT(category->end_ != NULL);
|
| free_bytes = category->available();
|
| if (end_ == NULL) {
|
| @@ -2914,7 +2914,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
|
|
|
|
|
| size_t LargeObjectSpace::CommittedPhysicalMemory() {
|
| - if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
|
| + if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
|
| size_t size = 0;
|
| LargePage* current = first_page_;
|
| while (current != NULL) {
|
|
|