Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(95)

Side by Side Diff: src/spaces.cc

Issue 7389008: Make Win64 compile. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Addressed review comments Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after
153 return false; 153 return false;
154 } 154 }
155 155
156 // We are sure that we have mapped a block of requested addresses. 156 // We are sure that we have mapped a block of requested addresses.
157 ASSERT(code_range_->size() == requested); 157 ASSERT(code_range_->size() == requested);
158 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); 158 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
159 Address base = reinterpret_cast<Address>(code_range_->address()); 159 Address base = reinterpret_cast<Address>(code_range_->address());
160 Address aligned_base = 160 Address aligned_base =
161 RoundUp(reinterpret_cast<Address>(code_range_->address()), 161 RoundUp(reinterpret_cast<Address>(code_range_->address()),
162 MemoryChunk::kAlignment); 162 MemoryChunk::kAlignment);
163 int size = code_range_->size() - (aligned_base - base); 163 size_t size = code_range_->size() - (aligned_base - base);
164 allocation_list_.Add(FreeBlock(aligned_base, size)); 164 allocation_list_.Add(FreeBlock(aligned_base, size));
165 current_allocation_block_index_ = 0; 165 current_allocation_block_index_ = 0;
166 return true; 166 return true;
167 } 167 }
168 168
169 169
170 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, 170 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
171 const FreeBlock* right) { 171 const FreeBlock* right) {
172 // The entire point of CodeRange is that the difference between two 172 // The entire point of CodeRange is that the difference between two
173 // addresses in the range can be represented as a signed 32-bit int, 173 // addresses in the range can be represented as a signed 32-bit int,
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
313 } 313 }
314 } 314 }
315 315
316 316
317 Address MemoryAllocator::ReserveAlignedMemory(const size_t requested, 317 Address MemoryAllocator::ReserveAlignedMemory(const size_t requested,
318 size_t alignment, 318 size_t alignment,
319 size_t* allocated_size) { 319 size_t* allocated_size) {
320 ASSERT(IsAligned(alignment, OS::AllocateAlignment())); 320 ASSERT(IsAligned(alignment, OS::AllocateAlignment()));
321 if (size_ + requested > capacity_) return NULL; 321 if (size_ + requested > capacity_) return NULL;
322 322
323 size_t allocated = RoundUp(requested + alignment, OS::AllocateAlignment()); 323 size_t allocated = RoundUp(requested + alignment,
324 static_cast<intptr_t>(OS::AllocateAlignment()));
324 325
325 Address base = reinterpret_cast<Address>( 326 Address base = reinterpret_cast<Address>(
326 VirtualMemory::ReserveRegion(allocated)); 327 VirtualMemory::ReserveRegion(allocated));
327 328
328 Address end = base + allocated; 329 Address end = base + allocated;
329 330
330 if (base == 0) return NULL; 331 if (base == 0) return NULL;
331 332
332 Address aligned_base = RoundUp(base, alignment); 333 Address aligned_base = RoundUp(base, alignment);
333 334
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after
508 MemoryChunk::kAlignment, 509 MemoryChunk::kAlignment,
509 executable, 510 executable,
510 &chunk_size); 511 &chunk_size);
511 512
512 if (base == NULL) return NULL; 513 if (base == NULL) return NULL;
513 } 514 }
514 515
515 #ifdef DEBUG 516 #ifdef DEBUG
516 ZapBlock(base, chunk_size); 517 ZapBlock(base, chunk_size);
517 #endif 518 #endif
518 isolate_->counters()->memory_allocated()->Increment(chunk_size); 519 isolate_->counters()->memory_allocated()->
520 Increment(static_cast<int>(chunk_size));
519 521
520 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); 522 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
521 if (owner != NULL) { 523 if (owner != NULL) {
522 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 524 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
523 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 525 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
524 } 526 }
525 527
526 return MemoryChunk::Initialize(heap, 528 return MemoryChunk::Initialize(heap,
527 base, 529 base,
528 chunk_size, 530 chunk_size,
(...skipping 1303 matching lines...) Expand 10 before | Expand all | Expand 10 after
1832 int new_node_size = 0; 1834 int new_node_size = 0;
1833 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); 1835 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
1834 if (new_node == NULL) return NULL; 1836 if (new_node == NULL) return NULL;
1835 1837
1836 available_ -= new_node_size; 1838 available_ -= new_node_size;
1837 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 1839 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1838 1840
1839 int bytes_left = new_node_size - size_in_bytes; 1841 int bytes_left = new_node_size - size_in_bytes;
1840 ASSERT(bytes_left >= 0); 1842 ASSERT(bytes_left >= 0);
1841 1843
1842 int old_linear_size = owner_->limit() - owner_->top(); 1844 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
1843
1844 // Mark the old linear allocation area with a free space map so it can be 1845 // Mark the old linear allocation area with a free space map so it can be
1845 // skipped when scanning the heap. This also puts it back in the free list 1846 // skipped when scanning the heap. This also puts it back in the free list
1846 // if it is big enough. 1847 // if it is big enough.
1847 owner_->Free(owner_->top(), old_linear_size); 1848 owner_->Free(owner_->top(), old_linear_size);
1848 owner_->heap()->incremental_marking()->Step(size_in_bytes - old_linear_size); 1849 owner_->heap()->incremental_marking()->Step(size_in_bytes - old_linear_size);
1849 1850
1850 const int kThreshold = IncrementalMarking::kAllocatedThreshold; 1851 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
1851 1852
1852 // Memory in the linear allocation area is counted as allocated. We may free 1853 // Memory in the linear allocation area is counted as allocated. We may free
1853 // a little of this again immediately - see below. 1854 // a little of this again immediately - see below.
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
1959 Address top = allocation_info_.top; 1960 Address top = allocation_info_.top;
1960 return limit - top >= bytes; 1961 return limit - top >= bytes;
1961 } 1962 }
1962 1963
1963 1964
1964 void PagedSpace::PrepareForMarkCompact() { 1965 void PagedSpace::PrepareForMarkCompact() {
1965 // We don't have a linear allocation area while sweeping. It will be restored 1966 // We don't have a linear allocation area while sweeping. It will be restored
1966 // on the first allocation after the sweep. 1967 // on the first allocation after the sweep.
1967 // Mark the old linear allocation area with a free space map so it can be 1968 // Mark the old linear allocation area with a free space map so it can be
1968 // skipped when scanning the heap. 1969 // skipped when scanning the heap.
1969 int old_linear_size = limit() - top(); 1970 int old_linear_size = static_cast<int>(limit() - top());
1970 Free(top(), old_linear_size); 1971 Free(top(), old_linear_size);
1971 SetTop(NULL, NULL); 1972 SetTop(NULL, NULL);
1972 1973
1973 // Stop lazy sweeping for the space. 1974 // Stop lazy sweeping for the space.
1974 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL); 1975 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
1975 1976
1976 // Clear the free list before a full GC---it will be rebuilt afterward. 1977 // Clear the free list before a full GC---it will be rebuilt afterward.
1977 free_list_.Reset(); 1978 free_list_.Reset();
1978 1979
1979 // Clear EVACUATED flag from all pages. 1980 // Clear EVACUATED flag from all pages.
1980 PageIterator it(this); 1981 PageIterator it(this);
1981 while (it.has_next()) { 1982 while (it.has_next()) {
1982 Page* page = it.next(); 1983 Page* page = it.next();
1983 page->ClearSwept(); 1984 page->ClearSwept();
1984 } 1985 }
1985 } 1986 }
1986 1987
1987 1988
1988 bool PagedSpace::ReserveSpace(int size_in_bytes) { 1989 bool PagedSpace::ReserveSpace(int size_in_bytes) {
1989 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize); 1990 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
1990 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); 1991 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
1991 Address current_top = allocation_info_.top; 1992 Address current_top = allocation_info_.top;
1992 Address new_top = current_top + size_in_bytes; 1993 Address new_top = current_top + size_in_bytes;
1993 if (new_top <= allocation_info_.limit) return true; 1994 if (new_top <= allocation_info_.limit) return true;
1994 1995
1995 HeapObject* new_area = free_list_.Allocate(size_in_bytes); 1996 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
1996 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); 1997 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
1997 if (new_area == NULL) return false; 1998 if (new_area == NULL) return false;
1998 1999
1999 int old_linear_size = limit() - top(); 2000 int old_linear_size = static_cast<int>(limit() - top());
2000 // Mark the old linear allocation area with a free space so it can be 2001 // Mark the old linear allocation area with a free space so it can be
2001 // skipped when scanning the heap. This also puts it back in the free list 2002 // skipped when scanning the heap. This also puts it back in the free list
2002 // if it is big enough. 2003 // if it is big enough.
2003 Free(top(), old_linear_size); 2004 Free(top(), old_linear_size);
2004 2005
2005 SetTop(new_area->address(), new_area->address() + size_in_bytes); 2006 SetTop(new_area->address(), new_area->address() + size_in_bytes);
2006 Allocate(size_in_bytes); 2007 Allocate(size_in_bytes);
2007 return true; 2008 return true;
2008 } 2009 }
2009 2010
2010 2011
2011 // You have to call this last, since the implementation from PagedSpace 2012 // You have to call this last, since the implementation from PagedSpace
2012 // doesn't know that memory was 'promised' to large object space. 2013 // doesn't know that memory was 'promised' to large object space.
2013 bool LargeObjectSpace::ReserveSpace(int bytes) { 2014 bool LargeObjectSpace::ReserveSpace(int bytes) {
2014 return heap()->OldGenerationSpaceAvailable() >= bytes; 2015 return heap()->OldGenerationSpaceAvailable() >= bytes;
2015 } 2016 }
2016 2017
2017 2018
2018 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { 2019 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2019 if (IsSweepingComplete()) return true; 2020 if (IsSweepingComplete()) return true;
2020 2021
2021 int freed_bytes = 0; 2022 intptr_t freed_bytes = 0;
2022 Page* last = last_unswept_page_->next_page(); 2023 Page* last = last_unswept_page_->next_page();
2023 Page* p = first_unswept_page_; 2024 Page* p = first_unswept_page_;
2024 do { 2025 do {
2025 Page* next_page = p->next_page(); 2026 Page* next_page = p->next_page();
2026 // Evacuation candidates were swept by evacuator. 2027 // Evacuation candidates were swept by evacuator.
2027 if (!p->IsEvacuationCandidate() && 2028 if (!p->IsEvacuationCandidate() &&
2028 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && 2029 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
2029 !p->WasSwept()) { 2030 !p->WasSwept()) {
2030 freed_bytes += MarkCompactCollector::SweepConservatively(this, p); 2031 freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
2031 } 2032 }
(...skipping 444 matching lines...) Expand 10 before | Expand all | Expand 10 after
2476 heap()->FreeQueuedChunks(); 2477 heap()->FreeQueuedChunks();
2477 } 2478 }
2478 2479
2479 2480
2480 bool LargeObjectSpace::Contains(HeapObject* object) { 2481 bool LargeObjectSpace::Contains(HeapObject* object) {
2481 Address address = object->address(); 2482 Address address = object->address();
2482 MemoryChunk* chunk = MemoryChunk::FromAddress(address); 2483 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
2483 2484
2484 bool owned = (chunk->owner() == this); 2485 bool owned = (chunk->owner() == this);
2485 2486
2486 SLOW_ASSERT(!owned 2487 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
2487 || !FindObject(address)->IsFailure());
2488 2488
2489 return owned; 2489 return owned;
2490 } 2490 }
2491 2491
2492 2492
2493 #ifdef DEBUG 2493 #ifdef DEBUG
2494 // We do not assume that the large object iterator works, because it depends 2494 // We do not assume that the large object iterator works, because it depends
2495 // on the invariants we are checking during verification. 2495 // on the invariants we are checking during verification.
2496 void LargeObjectSpace::Verify() { 2496 void LargeObjectSpace::Verify() {
2497 for (LargePage* chunk = first_page_; 2497 for (LargePage* chunk = first_page_;
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
2570 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { 2570 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2571 if (obj->IsCode()) { 2571 if (obj->IsCode()) {
2572 Code* code = Code::cast(obj); 2572 Code* code = Code::cast(obj);
2573 isolate->code_kind_statistics()[code->kind()] += code->Size(); 2573 isolate->code_kind_statistics()[code->kind()] += code->Size();
2574 } 2574 }
2575 } 2575 }
2576 } 2576 }
2577 #endif // DEBUG 2577 #endif // DEBUG
2578 2578
2579 } } // namespace v8::internal 2579 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698