| OLD | NEW |
| 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 10 matching lines...) Expand all Loading... |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #ifndef V8_SPACES_INL_H_ | 28 #ifndef V8_SPACES_INL_H_ |
| 29 #define V8_SPACES_INL_H_ | 29 #define V8_SPACES_INL_H_ |
| 30 | 30 |
| 31 #include "isolate.h" |
| 31 #include "memory.h" | 32 #include "memory.h" |
| 32 #include "spaces.h" | 33 #include "spaces.h" |
| 33 | 34 |
| 34 namespace v8 { | 35 namespace v8 { |
| 35 namespace internal { | 36 namespace internal { |
| 36 | 37 |
| 37 | 38 |
| 38 // ----------------------------------------------------------------------------- | 39 // ----------------------------------------------------------------------------- |
| 39 // PageIterator | 40 // PageIterator |
| 40 | 41 |
| 41 bool PageIterator::has_next() { | 42 bool PageIterator::has_next() { |
| 42 return prev_page_ != stop_page_; | 43 return prev_page_ != stop_page_; |
| 43 } | 44 } |
| 44 | 45 |
| 45 | 46 |
| 46 Page* PageIterator::next() { | 47 Page* PageIterator::next() { |
| 47 ASSERT(has_next()); | 48 ASSERT(has_next()); |
| 48 prev_page_ = (prev_page_ == NULL) | 49 prev_page_ = (prev_page_ == NULL) |
| 49 ? space_->first_page_ | 50 ? space_->first_page_ |
| 50 : prev_page_->next_page(); | 51 : prev_page_->next_page(); |
| 51 return prev_page_; | 52 return prev_page_; |
| 52 } | 53 } |
| 53 | 54 |
| 54 | 55 |
| 55 // ----------------------------------------------------------------------------- | 56 // ----------------------------------------------------------------------------- |
| 56 // Page | 57 // Page |
| 57 | 58 |
| 58 Page* Page::next_page() { | 59 Page* Page::next_page() { |
| 59 return MemoryAllocator::GetNextPage(this); | 60 return heap_->isolate()->memory_allocator()->GetNextPage(this); |
| 60 } | 61 } |
| 61 | 62 |
| 62 | 63 |
| 63 Address Page::AllocationTop() { | 64 Address Page::AllocationTop() { |
| 64 PagedSpace* owner = MemoryAllocator::PageOwner(this); | 65 PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this); |
| 65 return owner->PageAllocationTop(this); | 66 return owner->PageAllocationTop(this); |
| 66 } | 67 } |
| 67 | 68 |
| 68 | 69 |
| 69 Address Page::AllocationWatermark() { | 70 Address Page::AllocationWatermark() { |
| 70 PagedSpace* owner = MemoryAllocator::PageOwner(this); | 71 PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this); |
| 71 if (this == owner->AllocationTopPage()) { | 72 if (this == owner->AllocationTopPage()) { |
| 72 return owner->top(); | 73 return owner->top(); |
| 73 } | 74 } |
| 74 return address() + AllocationWatermarkOffset(); | 75 return address() + AllocationWatermarkOffset(); |
| 75 } | 76 } |
| 76 | 77 |
| 77 | 78 |
| 78 uint32_t Page::AllocationWatermarkOffset() { | 79 uint32_t Page::AllocationWatermarkOffset() { |
| 79 return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >> | 80 return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >> |
| 80 kAllocationWatermarkOffsetShift); | 81 kAllocationWatermarkOffsetShift); |
| 81 } | 82 } |
| 82 | 83 |
| 83 | 84 |
| 84 void Page::SetAllocationWatermark(Address allocation_watermark) { | 85 void Page::SetAllocationWatermark(Address allocation_watermark) { |
| 85 if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) { | 86 if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) { |
| 86 // When iterating intergenerational references during scavenge | 87 // When iterating intergenerational references during scavenge |
| 87 // we might decide to promote an encountered young object. | 88 // we might decide to promote an encountered young object. |
| 88 // We will allocate a space for such an object and put it | 89 // We will allocate a space for such an object and put it |
| 89 // into the promotion queue to process it later. | 90 // into the promotion queue to process it later. |
| 90 // If space for object was allocated somewhere beyond allocation | 91 // If space for object was allocated somewhere beyond allocation |
| 91 // watermark this might cause garbage pointers to appear under allocation | 92 // watermark this might cause garbage pointers to appear under allocation |
| 92 // watermark. To avoid visiting them during dirty regions iteration | 93 // watermark. To avoid visiting them during dirty regions iteration |
| 93 // which might be still in progress we store a valid allocation watermark | 94 // which might be still in progress we store a valid allocation watermark |
| 94 // value and mark this page as having an invalid watermark. | 95 // value and mark this page as having an invalid watermark. |
| 95 SetCachedAllocationWatermark(AllocationWatermark()); | 96 SetCachedAllocationWatermark(AllocationWatermark()); |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 212 while (++rstart < rend) { | 213 while (++rstart < rend) { |
| 213 bitmask |= 1 << rstart; | 214 bitmask |= 1 << rstart; |
| 214 } | 215 } |
| 215 | 216 |
| 216 if (bitmask) { | 217 if (bitmask) { |
| 217 SetRegionMarks(GetRegionMarks() & ~bitmask); | 218 SetRegionMarks(GetRegionMarks() & ~bitmask); |
| 218 } | 219 } |
| 219 } | 220 } |
| 220 | 221 |
| 221 | 222 |
| 222 void Page::FlipMeaningOfInvalidatedWatermarkFlag() { | 223 void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) { |
| 223 watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED; | 224 heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED; |
| 224 } | 225 } |
| 225 | 226 |
| 226 | 227 |
| 227 bool Page::IsWatermarkValid() { | 228 bool Page::IsWatermarkValid() { |
| 228 return (flags_ & (1 << WATERMARK_INVALIDATED)) != watermark_invalidated_mark_; | 229 return (flags_ & (1 << WATERMARK_INVALIDATED)) != |
| 230 heap_->page_watermark_invalidated_mark_; |
| 229 } | 231 } |
| 230 | 232 |
| 231 | 233 |
| 232 void Page::InvalidateWatermark(bool value) { | 234 void Page::InvalidateWatermark(bool value) { |
| 233 if (value) { | 235 if (value) { |
| 234 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) | | 236 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) | |
| 235 watermark_invalidated_mark_; | 237 heap_->page_watermark_invalidated_mark_; |
| 236 } else { | 238 } else { |
| 237 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) | | 239 flags_ = |
| 238 (watermark_invalidated_mark_ ^ (1 << WATERMARK_INVALIDATED)); | 240 (flags_ & ~(1 << WATERMARK_INVALIDATED)) | |
| 241 (heap_->page_watermark_invalidated_mark_ ^ |
| 242 (1 << WATERMARK_INVALIDATED)); |
| 239 } | 243 } |
| 240 | 244 |
| 241 ASSERT(IsWatermarkValid() == !value); | 245 ASSERT(IsWatermarkValid() == !value); |
| 242 } | 246 } |
| 243 | 247 |
| 244 | 248 |
| 245 bool Page::GetPageFlag(PageFlag flag) { | 249 bool Page::GetPageFlag(PageFlag flag) { |
| 246 return (flags_ & static_cast<intptr_t>(1 << flag)) != 0; | 250 return (flags_ & static_cast<intptr_t>(1 << flag)) != 0; |
| 247 } | 251 } |
| 248 | 252 |
| 249 | 253 |
| 250 void Page::SetPageFlag(PageFlag flag, bool value) { | 254 void Page::SetPageFlag(PageFlag flag, bool value) { |
| 251 if (value) { | 255 if (value) { |
| 252 flags_ |= static_cast<intptr_t>(1 << flag); | 256 flags_ |= static_cast<intptr_t>(1 << flag); |
| 253 } else { | 257 } else { |
| 254 flags_ &= ~static_cast<intptr_t>(1 << flag); | 258 flags_ &= ~static_cast<intptr_t>(1 << flag); |
| 255 } | 259 } |
| 256 } | 260 } |
| 257 | 261 |
| 258 | 262 |
| 259 void Page::ClearPageFlags() { | 263 void Page::ClearPageFlags() { |
| 260 flags_ = 0; | 264 flags_ = 0; |
| 261 } | 265 } |
| 262 | 266 |
| 263 | 267 |
| 264 void Page::ClearGCFields() { | 268 void Page::ClearGCFields() { |
| 265 InvalidateWatermark(true); | 269 InvalidateWatermark(true); |
| 266 SetAllocationWatermark(ObjectAreaStart()); | 270 SetAllocationWatermark(ObjectAreaStart()); |
| 267 if (Heap::gc_state() == Heap::SCAVENGE) { | 271 if (heap_->gc_state() == Heap::SCAVENGE) { |
| 268 SetCachedAllocationWatermark(ObjectAreaStart()); | 272 SetCachedAllocationWatermark(ObjectAreaStart()); |
| 269 } | 273 } |
| 270 SetRegionMarks(kAllRegionsCleanMarks); | 274 SetRegionMarks(kAllRegionsCleanMarks); |
| 271 } | 275 } |
| 272 | 276 |
| 273 | 277 |
| 274 bool Page::WasInUseBeforeMC() { | 278 bool Page::WasInUseBeforeMC() { |
| 275 return GetPageFlag(WAS_IN_USE_BEFORE_MC); | 279 return GetPageFlag(WAS_IN_USE_BEFORE_MC); |
| 276 } | 280 } |
| 277 | 281 |
| (...skipping 23 matching lines...) Expand all Loading... |
| 301 | 305 |
| 302 | 306 |
| 303 // ----------------------------------------------------------------------------- | 307 // ----------------------------------------------------------------------------- |
| 304 // MemoryAllocator | 308 // MemoryAllocator |
| 305 | 309 |
| 306 void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) { | 310 void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) { |
| 307 address_ = a; | 311 address_ = a; |
| 308 size_ = s; | 312 size_ = s; |
| 309 owner_ = o; | 313 owner_ = o; |
| 310 executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable(); | 314 executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable(); |
| 315 owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity(); |
| 311 } | 316 } |
| 312 | 317 |
| 313 | 318 |
| 314 bool MemoryAllocator::IsValidChunk(int chunk_id) { | 319 bool MemoryAllocator::IsValidChunk(int chunk_id) { |
| 315 if (!IsValidChunkId(chunk_id)) return false; | 320 if (!IsValidChunkId(chunk_id)) return false; |
| 316 | 321 |
| 317 ChunkInfo& c = chunks_[chunk_id]; | 322 ChunkInfo& c = chunks_[chunk_id]; |
| 318 return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL); | 323 return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL); |
| 319 } | 324 } |
| 320 | 325 |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 401 | 406 |
| 402 #endif | 407 #endif |
| 403 | 408 |
| 404 | 409 |
| 405 // -------------------------------------------------------------------------- | 410 // -------------------------------------------------------------------------- |
| 406 // PagedSpace | 411 // PagedSpace |
| 407 | 412 |
| 408 bool PagedSpace::Contains(Address addr) { | 413 bool PagedSpace::Contains(Address addr) { |
| 409 Page* p = Page::FromAddress(addr); | 414 Page* p = Page::FromAddress(addr); |
| 410 if (!p->is_valid()) return false; | 415 if (!p->is_valid()) return false; |
| 411 return MemoryAllocator::IsPageInSpace(p, this); | 416 return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this); |
| 412 } | |
| 413 | |
| 414 | |
| 415 bool PagedSpace::SafeContains(Address addr) { | |
| 416 if (!MemoryAllocator::SafeIsInAPageChunk(addr)) return false; | |
| 417 Page* p = Page::FromAddress(addr); | |
| 418 if (!p->is_valid()) return false; | |
| 419 return MemoryAllocator::IsPageInSpace(p, this); | |
| 420 } | 417 } |
| 421 | 418 |
| 422 | 419 |
| 423 // Try linear allocation in the page of alloc_info's allocation top. Does | 420 // Try linear allocation in the page of alloc_info's allocation top. Does |
| 424 // not contain slow case logic (eg, move to the next page or try free list | 421 // not contain slow case logic (eg, move to the next page or try free list |
| 425 // allocation) so it can be used by all the allocation functions and for all | 422 // allocation) so it can be used by all the allocation functions and for all |
| 426 // the paged spaces. | 423 // the paged spaces. |
| 427 HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info, | 424 HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info, |
| 428 int size_in_bytes) { | 425 int size_in_bytes) { |
| 429 Address current_top = alloc_info->top; | 426 Address current_top = alloc_info->top; |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 470 | 467 |
| 471 Address LargeObjectChunk::GetStartAddress() { | 468 Address LargeObjectChunk::GetStartAddress() { |
| 472 // Round the chunk address up to the nearest page-aligned address | 469 // Round the chunk address up to the nearest page-aligned address |
| 473 // and return the heap object in that page. | 470 // and return the heap object in that page. |
| 474 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize)); | 471 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize)); |
| 475 return page->ObjectAreaStart(); | 472 return page->ObjectAreaStart(); |
| 476 } | 473 } |
| 477 | 474 |
| 478 | 475 |
| 479 void LargeObjectChunk::Free(Executability executable) { | 476 void LargeObjectChunk::Free(Executability executable) { |
| 480 MemoryAllocator::FreeRawMemory(address(), size(), executable); | 477 Isolate* isolate = |
| 478 Page::FromAddress(RoundUp(address(), Page::kPageSize))->heap_->isolate(); |
| 479 isolate->memory_allocator()->FreeRawMemory(address(), size(), executable); |
| 481 } | 480 } |
| 482 | 481 |
| 483 // ----------------------------------------------------------------------------- | 482 // ----------------------------------------------------------------------------- |
| 484 // NewSpace | 483 // NewSpace |
| 485 | 484 |
| 486 MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes, | 485 MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes, |
| 487 AllocationInfo* alloc_info) { | 486 AllocationInfo* alloc_info) { |
| 488 Address new_top = alloc_info->top + size_in_bytes; | 487 Address new_top = alloc_info->top + size_in_bytes; |
| 489 if (new_top > alloc_info->limit) return Failure::RetryAfterGC(); | 488 if (new_top > alloc_info->limit) return Failure::RetryAfterGC(); |
| 490 | 489 |
| 491 Object* obj = HeapObject::FromAddress(alloc_info->top); | 490 Object* obj = HeapObject::FromAddress(alloc_info->top); |
| 492 alloc_info->top = new_top; | 491 alloc_info->top = new_top; |
| 493 #ifdef DEBUG | 492 #ifdef DEBUG |
| 494 SemiSpace* space = | 493 SemiSpace* space = |
| 495 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_; | 494 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_; |
| 496 ASSERT(space->low() <= alloc_info->top | 495 ASSERT(space->low() <= alloc_info->top |
| 497 && alloc_info->top <= space->high() | 496 && alloc_info->top <= space->high() |
| 498 && alloc_info->limit == space->high()); | 497 && alloc_info->limit == space->high()); |
| 499 #endif | 498 #endif |
| 500 return obj; | 499 return obj; |
| 501 } | 500 } |
| 502 | 501 |
| 503 | 502 |
| 503 intptr_t LargeObjectSpace::Available() { |
| 504 return LargeObjectChunk::ObjectSizeFor( |
| 505 heap()->isolate()->memory_allocator()->Available()); |
| 506 } |
| 507 |
| 508 |
| 504 template <typename StringType> | 509 template <typename StringType> |
| 505 void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) { | 510 void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) { |
| 506 ASSERT(length <= string->length()); | 511 ASSERT(length <= string->length()); |
| 507 ASSERT(string->IsSeqString()); | 512 ASSERT(string->IsSeqString()); |
| 508 ASSERT(string->address() + StringType::SizeFor(string->length()) == | 513 ASSERT(string->address() + StringType::SizeFor(string->length()) == |
| 509 allocation_info_.top); | 514 allocation_info_.top); |
| 510 allocation_info_.top = | 515 allocation_info_.top = |
| 511 string->address() + StringType::SizeFor(length); | 516 string->address() + StringType::SizeFor(length); |
| 512 string->set_length(length); | 517 string->set_length(length); |
| 513 } | 518 } |
| 514 | 519 |
| 515 | 520 |
| 516 bool FreeListNode::IsFreeListNode(HeapObject* object) { | 521 bool FreeListNode::IsFreeListNode(HeapObject* object) { |
| 517 return object->map() == Heap::raw_unchecked_byte_array_map() | 522 return object->map() == HEAP->raw_unchecked_byte_array_map() |
| 518 || object->map() == Heap::raw_unchecked_one_pointer_filler_map() | 523 || object->map() == HEAP->raw_unchecked_one_pointer_filler_map() |
| 519 || object->map() == Heap::raw_unchecked_two_pointer_filler_map(); | 524 || object->map() == HEAP->raw_unchecked_two_pointer_filler_map(); |
| 520 } | 525 } |
| 521 | 526 |
| 522 } } // namespace v8::internal | 527 } } // namespace v8::internal |
| 523 | 528 |
| 524 #endif // V8_SPACES_INL_H_ | 529 #endif // V8_SPACES_INL_H_ |
| OLD | NEW |