Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(649)

Unified Diff: src/spaces.cc

Issue 7389008: Make Win64 compile. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/spaces.cc
diff --git a/src/spaces.cc b/src/spaces.cc
index 18f7c1c48b77c19dd1f80c90fcc328d6ef856890..27567100caca8fc2146d969801c2261572dfa505 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -160,7 +160,7 @@ bool CodeRange::Setup(const size_t requested) {
Address aligned_base =
RoundUp(reinterpret_cast<Address>(code_range_->address()),
MemoryChunk::kAlignment);
- int size = code_range_->size() - (aligned_base - base);
+ size_t size = code_range_->size() - (aligned_base - base);
allocation_list_.Add(FreeBlock(aligned_base, size));
current_allocation_block_index_ = 0;
return true;
@@ -318,16 +318,21 @@ Address MemoryAllocator::ReserveAlignedMemory(const size_t requested,
size_t alignment,
size_t* allocated_size) {
ASSERT(IsAligned(alignment, OS::AllocateAlignment()));
- if (size_ + requested > capacity_) return NULL;
+ if (size_ + requested > capacity_) {
+ return NULL;
+ }
Erik Corry 2011/07/15 21:37:33 Unneeded change.
Lasse Reichstein 2011/08/01 12:40:33 I added it in order to be able to make a breakpoin
- size_t allocated = RoundUp(requested + alignment, OS::AllocateAlignment());
+ size_t allocated = RoundUp(requested + alignment,
+ static_cast<int>(OS::AllocateAlignment()));
Erik Corry 2011/07/15 21:37:33 RoundUp takes a size_t and an int? Perhaps that's
Lasse Reichstein 2011/08/01 12:40:33 It's taking intptr_t for its second argument actua
Address base = reinterpret_cast<Address>(
VirtualMemory::ReserveRegion(allocated));
Address end = base + allocated;
- if (base == 0) return NULL;
+ if (base == 0) {
+ return NULL;
Lasse Reichstein 2011/08/01 12:40:33 reverted.
+ }
Address aligned_base = RoundUp(base, alignment);
@@ -515,7 +520,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
#ifdef DEBUG
ZapBlock(base, chunk_size);
#endif
- isolate_->counters()->memory_allocated()->Increment(chunk_size);
+ isolate_->counters()->memory_allocated()->
+ Increment(static_cast<int>(chunk_size));
LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
if (owner != NULL) {
@@ -662,8 +668,8 @@ PagedSpace::PagedSpace(Heap* heap,
* Page::kObjectAreaSize;
accounting_stats_.Clear();
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.top = reinterpret_cast<Address>(static_cast<uintptr_t>(2));
Erik Corry 2011/07/15 21:37:33 2? Don't understand this at all, but it seems the
Lasse Reichstein 2011/08/01 12:40:33 Sorry, leftover from debugging. Should be NULL aga
+ allocation_info_.limit = reinterpret_cast<Address>(static_cast<uintptr_t>(2));
anchor_.InitializeAsAnchor(this);
}
@@ -856,7 +862,9 @@ bool NewSpace::Setup(int maximum_semispace_capacity) {
2 * maximum_semispace_capacity,
&size);
- if (base == NULL) return false;
+ if (base == NULL) {
Erik Corry 2011/07/15 21:37:33 Grrr.
Lasse Reichstein 2011/08/01 12:40:33 Curly-brace-ophobia is treatable.
+ return false;
+ }
chunk_base_ = base;
chunk_size_ = static_cast<uintptr_t>(size);
@@ -1839,8 +1847,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
int bytes_left = new_node_size - size_in_bytes;
ASSERT(bytes_left >= 0);
- int old_linear_size = owner_->limit() - owner_->top();
-
+ int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
@@ -1966,7 +1973,7 @@ void PagedSpace::PrepareForMarkCompact() {
// on the first allocation after the sweep.
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap.
- int old_linear_size = limit() - top();
+ int old_linear_size = static_cast<int>(limit() - top());
Free(top(), old_linear_size);
SetTop(NULL, NULL);
@@ -1996,7 +2003,7 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) {
if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
if (new_area == NULL) return false;
- int old_linear_size = limit() - top();
+ int old_linear_size = static_cast<int>(limit() - top());
// Mark the old linear allocation area with a free space so it can be
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
@@ -2483,8 +2490,7 @@ bool LargeObjectSpace::Contains(HeapObject* object) {
bool owned = (chunk->owner() == this);
- SLOW_ASSERT(!owned
- || !FindObject(address)->IsFailure());
+ SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
return owned;
}

Powered by Google App Engine
This is Rietveld 408576698