Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 153 return false; | 153 return false; |
| 154 } | 154 } |
| 155 | 155 |
| 156 // We are sure that we have mapped a block of requested addresses. | 156 // We are sure that we have mapped a block of requested addresses. |
| 157 ASSERT(code_range_->size() == requested); | 157 ASSERT(code_range_->size() == requested); |
| 158 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); | 158 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); |
| 159 Address base = reinterpret_cast<Address>(code_range_->address()); | 159 Address base = reinterpret_cast<Address>(code_range_->address()); |
| 160 Address aligned_base = | 160 Address aligned_base = |
| 161 RoundUp(reinterpret_cast<Address>(code_range_->address()), | 161 RoundUp(reinterpret_cast<Address>(code_range_->address()), |
| 162 MemoryChunk::kAlignment); | 162 MemoryChunk::kAlignment); |
| 163 int size = code_range_->size() - (aligned_base - base); | 163 size_t size = code_range_->size() - (aligned_base - base); |
| 164 allocation_list_.Add(FreeBlock(aligned_base, size)); | 164 allocation_list_.Add(FreeBlock(aligned_base, size)); |
| 165 current_allocation_block_index_ = 0; | 165 current_allocation_block_index_ = 0; |
| 166 return true; | 166 return true; |
| 167 } | 167 } |
| 168 | 168 |
| 169 | 169 |
| 170 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, | 170 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, |
| 171 const FreeBlock* right) { | 171 const FreeBlock* right) { |
| 172 // The entire point of CodeRange is that the difference between two | 172 // The entire point of CodeRange is that the difference between two |
| 173 // addresses in the range can be represented as a signed 32-bit int, | 173 // addresses in the range can be represented as a signed 32-bit int, |
| (...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 311 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); | 311 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); |
| 312 VirtualMemory::ReleaseRegion(base, size); | 312 VirtualMemory::ReleaseRegion(base, size); |
| 313 } | 313 } |
| 314 } | 314 } |
| 315 | 315 |
| 316 | 316 |
| 317 Address MemoryAllocator::ReserveAlignedMemory(const size_t requested, | 317 Address MemoryAllocator::ReserveAlignedMemory(const size_t requested, |
| 318 size_t alignment, | 318 size_t alignment, |
| 319 size_t* allocated_size) { | 319 size_t* allocated_size) { |
| 320 ASSERT(IsAligned(alignment, OS::AllocateAlignment())); | 320 ASSERT(IsAligned(alignment, OS::AllocateAlignment())); |
| 321 if (size_ + requested > capacity_) return NULL; | 321 if (size_ + requested > capacity_) { |
| 322 return NULL; | |
| 323 } | |
|
Erik Corry
2011/07/15 21:37:33
Unneeded change.
Lasse Reichstein
2011/08/01 12:40:33
I added it in order to be able to make a breakpoin
| |
| 322 | 324 |
| 323 size_t allocated = RoundUp(requested + alignment, OS::AllocateAlignment()); | 325 size_t allocated = RoundUp(requested + alignment, |
| 326 static_cast<int>(OS::AllocateAlignment())); | |
|
Erik Corry
2011/07/15 21:37:33
RoundUp takes a size_t and an int? Perhaps that's
Lasse Reichstein
2011/08/01 12:40:33
It's taking intptr_t for its second argument actua
| |
| 324 | 327 |
| 325 Address base = reinterpret_cast<Address>( | 328 Address base = reinterpret_cast<Address>( |
| 326 VirtualMemory::ReserveRegion(allocated)); | 329 VirtualMemory::ReserveRegion(allocated)); |
| 327 | 330 |
| 328 Address end = base + allocated; | 331 Address end = base + allocated; |
| 329 | 332 |
| 330 if (base == 0) return NULL; | 333 if (base == 0) { |
| 334 return NULL; | |
|
Lasse Reichstein
2011/08/01 12:40:33
reverted.
| |
| 335 } | |
| 331 | 336 |
| 332 Address aligned_base = RoundUp(base, alignment); | 337 Address aligned_base = RoundUp(base, alignment); |
| 333 | 338 |
| 334 ASSERT(aligned_base + requested <= base + allocated); | 339 ASSERT(aligned_base + requested <= base + allocated); |
| 335 | 340 |
| 336 // The difference between re-aligned base address and base address is | 341 // The difference between re-aligned base address and base address is |
| 337 // multiple of OS::AllocateAlignment(). | 342 // multiple of OS::AllocateAlignment(). |
| 338 if (aligned_base != base) { | 343 if (aligned_base != base) { |
| 339 ASSERT(aligned_base > base); | 344 ASSERT(aligned_base > base); |
| 340 // TODO(gc) check result of operation? | 345 // TODO(gc) check result of operation? |
| (...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 508 MemoryChunk::kAlignment, | 513 MemoryChunk::kAlignment, |
| 509 executable, | 514 executable, |
| 510 &chunk_size); | 515 &chunk_size); |
| 511 | 516 |
| 512 if (base == NULL) return NULL; | 517 if (base == NULL) return NULL; |
| 513 } | 518 } |
| 514 | 519 |
| 515 #ifdef DEBUG | 520 #ifdef DEBUG |
| 516 ZapBlock(base, chunk_size); | 521 ZapBlock(base, chunk_size); |
| 517 #endif | 522 #endif |
| 518 isolate_->counters()->memory_allocated()->Increment(chunk_size); | 523 isolate_->counters()->memory_allocated()-> |
| 524 Increment(static_cast<int>(chunk_size)); | |
| 519 | 525 |
| 520 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); | 526 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
| 521 if (owner != NULL) { | 527 if (owner != NULL) { |
| 522 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | 528 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
| 523 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); | 529 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
| 524 } | 530 } |
| 525 | 531 |
| 526 return MemoryChunk::Initialize(heap, | 532 return MemoryChunk::Initialize(heap, |
| 527 base, | 533 base, |
| 528 chunk_size, | 534 chunk_size, |
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 655 Executability executable) | 661 Executability executable) |
| 656 : Space(heap, id, executable), | 662 : Space(heap, id, executable), |
| 657 free_list_(this), | 663 free_list_(this), |
| 658 was_swept_conservatively_(false), | 664 was_swept_conservatively_(false), |
| 659 first_unswept_page_(Page::FromAddress(NULL)), | 665 first_unswept_page_(Page::FromAddress(NULL)), |
| 660 last_unswept_page_(Page::FromAddress(NULL)) { | 666 last_unswept_page_(Page::FromAddress(NULL)) { |
| 661 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 667 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
| 662 * Page::kObjectAreaSize; | 668 * Page::kObjectAreaSize; |
| 663 accounting_stats_.Clear(); | 669 accounting_stats_.Clear(); |
| 664 | 670 |
| 665 allocation_info_.top = NULL; | 671 allocation_info_.top = reinterpret_cast<Address>(static_cast<uintptr_t>(2)); |
|
Erik Corry
2011/07/15 21:37:33
2?
Don't understand this at all, but it seems the
Lasse Reichstein
2011/08/01 12:40:33
Sorry, leftover from debugging. Should be NULL aga
| |
| 666 allocation_info_.limit = NULL; | 672 allocation_info_.limit = reinterpret_cast<Address>(static_cast<uintptr_t>(2)); |
| 667 | 673 |
| 668 anchor_.InitializeAsAnchor(this); | 674 anchor_.InitializeAsAnchor(this); |
| 669 } | 675 } |
| 670 | 676 |
| 671 | 677 |
| 672 bool PagedSpace::Setup() { | 678 bool PagedSpace::Setup() { |
| 673 return true; | 679 return true; |
| 674 } | 680 } |
| 675 | 681 |
| 676 | 682 |
| (...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 849 // this chunk must be a power of two and it must be aligned to its size. | 855 // this chunk must be a power of two and it must be aligned to its size. |
| 850 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); | 856 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); |
| 851 | 857 |
| 852 size_t size = 0; | 858 size_t size = 0; |
| 853 Address base = | 859 Address base = |
| 854 heap()->isolate()->memory_allocator()->ReserveAlignedMemory( | 860 heap()->isolate()->memory_allocator()->ReserveAlignedMemory( |
| 855 2 * maximum_semispace_capacity, | 861 2 * maximum_semispace_capacity, |
| 856 2 * maximum_semispace_capacity, | 862 2 * maximum_semispace_capacity, |
| 857 &size); | 863 &size); |
| 858 | 864 |
| 859 if (base == NULL) return false; | 865 if (base == NULL) { |
|
Erik Corry
2011/07/15 21:37:33
Grrr.
Lasse Reichstein
2011/08/01 12:40:33
Curly-brace-ophobia is treatable.
| |
| 866 return false; | |
| 867 } | |
| 860 | 868 |
| 861 chunk_base_ = base; | 869 chunk_base_ = base; |
| 862 chunk_size_ = static_cast<uintptr_t>(size); | 870 chunk_size_ = static_cast<uintptr_t>(size); |
| 863 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); | 871 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); |
| 864 | 872 |
| 865 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); | 873 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); |
| 866 ASSERT(IsPowerOf2(maximum_semispace_capacity)); | 874 ASSERT(IsPowerOf2(maximum_semispace_capacity)); |
| 867 | 875 |
| 868 // Allocate and setup the histogram arrays if necessary. | 876 // Allocate and setup the histogram arrays if necessary. |
| 869 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 877 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
| (...skipping 962 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1832 int new_node_size = 0; | 1840 int new_node_size = 0; |
| 1833 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); | 1841 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
| 1834 if (new_node == NULL) return NULL; | 1842 if (new_node == NULL) return NULL; |
| 1835 | 1843 |
| 1836 available_ -= new_node_size; | 1844 available_ -= new_node_size; |
| 1837 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | 1845 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
| 1838 | 1846 |
| 1839 int bytes_left = new_node_size - size_in_bytes; | 1847 int bytes_left = new_node_size - size_in_bytes; |
| 1840 ASSERT(bytes_left >= 0); | 1848 ASSERT(bytes_left >= 0); |
| 1841 | 1849 |
| 1842 int old_linear_size = owner_->limit() - owner_->top(); | 1850 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
| 1843 | |
| 1844 // Mark the old linear allocation area with a free space map so it can be | 1851 // Mark the old linear allocation area with a free space map so it can be |
| 1845 // skipped when scanning the heap. This also puts it back in the free list | 1852 // skipped when scanning the heap. This also puts it back in the free list |
| 1846 // if it is big enough. | 1853 // if it is big enough. |
| 1847 owner_->Free(owner_->top(), old_linear_size); | 1854 owner_->Free(owner_->top(), old_linear_size); |
| 1848 owner_->heap()->incremental_marking()->Step(size_in_bytes - old_linear_size); | 1855 owner_->heap()->incremental_marking()->Step(size_in_bytes - old_linear_size); |
| 1849 | 1856 |
| 1850 const int kThreshold = IncrementalMarking::kAllocatedThreshold; | 1857 const int kThreshold = IncrementalMarking::kAllocatedThreshold; |
| 1851 | 1858 |
| 1852 // Memory in the linear allocation area is counted as allocated. We may free | 1859 // Memory in the linear allocation area is counted as allocated. We may free |
| 1853 // a little of this again immediately - see below. | 1860 // a little of this again immediately - see below. |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1959 Address top = allocation_info_.top; | 1966 Address top = allocation_info_.top; |
| 1960 return limit - top >= bytes; | 1967 return limit - top >= bytes; |
| 1961 } | 1968 } |
| 1962 | 1969 |
| 1963 | 1970 |
| 1964 void PagedSpace::PrepareForMarkCompact() { | 1971 void PagedSpace::PrepareForMarkCompact() { |
| 1965 // We don't have a linear allocation area while sweeping. It will be restored | 1972 // We don't have a linear allocation area while sweeping. It will be restored |
| 1966 // on the first allocation after the sweep. | 1973 // on the first allocation after the sweep. |
| 1967 // Mark the old linear allocation area with a free space map so it can be | 1974 // Mark the old linear allocation area with a free space map so it can be |
| 1968 // skipped when scanning the heap. | 1975 // skipped when scanning the heap. |
| 1969 int old_linear_size = limit() - top(); | 1976 int old_linear_size = static_cast<int>(limit() - top()); |
| 1970 Free(top(), old_linear_size); | 1977 Free(top(), old_linear_size); |
| 1971 SetTop(NULL, NULL); | 1978 SetTop(NULL, NULL); |
| 1972 | 1979 |
| 1973 // Stop lazy sweeping for the space. | 1980 // Stop lazy sweeping for the space. |
| 1974 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL); | 1981 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL); |
| 1975 | 1982 |
| 1976 // Clear the free list before a full GC---it will be rebuilt afterward. | 1983 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 1977 free_list_.Reset(); | 1984 free_list_.Reset(); |
| 1978 | 1985 |
| 1979 // Clear EVACUATED flag from all pages. | 1986 // Clear EVACUATED flag from all pages. |
| 1980 PageIterator it(this); | 1987 PageIterator it(this); |
| 1981 while (it.has_next()) { | 1988 while (it.has_next()) { |
| 1982 Page* page = it.next(); | 1989 Page* page = it.next(); |
| 1983 page->ClearSwept(); | 1990 page->ClearSwept(); |
| 1984 } | 1991 } |
| 1985 } | 1992 } |
| 1986 | 1993 |
| 1987 | 1994 |
| 1988 bool PagedSpace::ReserveSpace(int size_in_bytes) { | 1995 bool PagedSpace::ReserveSpace(int size_in_bytes) { |
| 1989 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize); | 1996 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize); |
| 1990 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); | 1997 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); |
| 1991 Address current_top = allocation_info_.top; | 1998 Address current_top = allocation_info_.top; |
| 1992 Address new_top = current_top + size_in_bytes; | 1999 Address new_top = current_top + size_in_bytes; |
| 1993 if (new_top <= allocation_info_.limit) return true; | 2000 if (new_top <= allocation_info_.limit) return true; |
| 1994 | 2001 |
| 1995 HeapObject* new_area = free_list_.Allocate(size_in_bytes); | 2002 HeapObject* new_area = free_list_.Allocate(size_in_bytes); |
| 1996 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); | 2003 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); |
| 1997 if (new_area == NULL) return false; | 2004 if (new_area == NULL) return false; |
| 1998 | 2005 |
| 1999 int old_linear_size = limit() - top(); | 2006 int old_linear_size = static_cast<int>(limit() - top()); |
| 2000 // Mark the old linear allocation area with a free space so it can be | 2007 // Mark the old linear allocation area with a free space so it can be |
| 2001 // skipped when scanning the heap. This also puts it back in the free list | 2008 // skipped when scanning the heap. This also puts it back in the free list |
| 2002 // if it is big enough. | 2009 // if it is big enough. |
| 2003 Free(top(), old_linear_size); | 2010 Free(top(), old_linear_size); |
| 2004 | 2011 |
| 2005 SetTop(new_area->address(), new_area->address() + size_in_bytes); | 2012 SetTop(new_area->address(), new_area->address() + size_in_bytes); |
| 2006 Allocate(size_in_bytes); | 2013 Allocate(size_in_bytes); |
| 2007 return true; | 2014 return true; |
| 2008 } | 2015 } |
| 2009 | 2016 |
| (...skipping 466 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2476 heap()->FreeQueuedChunks(); | 2483 heap()->FreeQueuedChunks(); |
| 2477 } | 2484 } |
| 2478 | 2485 |
| 2479 | 2486 |
| 2480 bool LargeObjectSpace::Contains(HeapObject* object) { | 2487 bool LargeObjectSpace::Contains(HeapObject* object) { |
| 2481 Address address = object->address(); | 2488 Address address = object->address(); |
| 2482 MemoryChunk* chunk = MemoryChunk::FromAddress(address); | 2489 MemoryChunk* chunk = MemoryChunk::FromAddress(address); |
| 2483 | 2490 |
| 2484 bool owned = (chunk->owner() == this); | 2491 bool owned = (chunk->owner() == this); |
| 2485 | 2492 |
| 2486 SLOW_ASSERT(!owned | 2493 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure()); |
| 2487 || !FindObject(address)->IsFailure()); | |
| 2488 | 2494 |
| 2489 return owned; | 2495 return owned; |
| 2490 } | 2496 } |
| 2491 | 2497 |
| 2492 | 2498 |
| 2493 #ifdef DEBUG | 2499 #ifdef DEBUG |
| 2494 // We do not assume that the large object iterator works, because it depends | 2500 // We do not assume that the large object iterator works, because it depends |
| 2495 // on the invariants we are checking during verification. | 2501 // on the invariants we are checking during verification. |
| 2496 void LargeObjectSpace::Verify() { | 2502 void LargeObjectSpace::Verify() { |
| 2497 for (LargePage* chunk = first_page_; | 2503 for (LargePage* chunk = first_page_; |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2570 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { | 2576 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { |
| 2571 if (obj->IsCode()) { | 2577 if (obj->IsCode()) { |
| 2572 Code* code = Code::cast(obj); | 2578 Code* code = Code::cast(obj); |
| 2573 isolate->code_kind_statistics()[code->kind()] += code->Size(); | 2579 isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 2574 } | 2580 } |
| 2575 } | 2581 } |
| 2576 } | 2582 } |
| 2577 #endif // DEBUG | 2583 #endif // DEBUG |
| 2578 | 2584 |
| 2579 } } // namespace v8::internal | 2585 } } // namespace v8::internal |
| OLD | NEW |