| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_INL_H_ | 5 #ifndef V8_HEAP_SPACES_INL_H_ |
| 6 #define V8_HEAP_SPACES_INL_H_ | 6 #define V8_HEAP_SPACES_INL_H_ |
| 7 | 7 |
| 8 #include "src/heap/incremental-marking.h" | 8 #include "src/heap/incremental-marking.h" |
| 9 #include "src/heap/spaces.h" | 9 #include "src/heap/spaces.h" |
| 10 #include "src/isolate.h" | 10 #include "src/isolate.h" |
| (...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 250 // -------------------------------------------------------------------------- | 250 // -------------------------------------------------------------------------- |
| 251 // PagedSpace | 251 // PagedSpace |
| 252 | 252 |
| 253 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable, | 253 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable, |
| 254 PagedSpace* owner) { | 254 PagedSpace* owner) { |
| 255 Page* page = reinterpret_cast<Page*>(chunk); | 255 Page* page = reinterpret_cast<Page*>(chunk); |
| 256 page->mutex_ = new base::Mutex(); | 256 page->mutex_ = new base::Mutex(); |
| 257 DCHECK(page->area_size() <= kAllocatableMemory); | 257 DCHECK(page->area_size() <= kAllocatableMemory); |
| 258 DCHECK(chunk->owner() == owner); | 258 DCHECK(chunk->owner() == owner); |
| 259 owner->IncreaseCapacity(page->area_size()); | 259 owner->IncreaseCapacity(page->area_size()); |
| 260 heap->incremental_marking()->SetOldSpacePageFlags(chunk); |
| 261 |
| 262 // Make sure that categories are initialized before freeing the area. |
| 263 page->InitializeFreeListCategories(); |
| 260 owner->Free(page->area_start(), page->area_size()); | 264 owner->Free(page->area_start(), page->area_size()); |
| 261 | 265 |
| 262 heap->incremental_marking()->SetOldSpacePageFlags(chunk); | 266 return page; |
| 267 } |
| 263 | 268 |
| 264 return page; | 269 void Page::InitializeFreeListCategories() { |
| 270 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { |
| 271 categories_[i].Initialize(static_cast<FreeListCategoryType>(i)); |
| 272 } |
| 265 } | 273 } |
| 266 | 274 |
| 267 void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) { | 275 void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) { |
| 268 MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by); | 276 MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by); |
| 269 } | 277 } |
| 270 | 278 |
| 271 void MemoryChunk::ResetLiveBytes() { | 279 void MemoryChunk::ResetLiveBytes() { |
| 272 if (FLAG_trace_live_bytes) { | 280 if (FLAG_trace_live_bytes) { |
| 273 PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n", this, | 281 PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n", this, |
| 274 live_byte_count_); | 282 live_byte_count_); |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 314 if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) { | 322 if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) { |
| 315 chunk = heap->lo_space()->FindPage(addr); | 323 chunk = heap->lo_space()->FindPage(addr); |
| 316 } | 324 } |
| 317 return chunk; | 325 return chunk; |
| 318 } | 326 } |
| 319 | 327 |
| 320 Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) { | 328 Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) { |
| 321 return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr)); | 329 return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr)); |
| 322 } | 330 } |
| 323 | 331 |
| 332 void Page::MarkNeverAllocateForTesting() { |
| 333 DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE)); |
| 334 SetFlag(NEVER_ALLOCATE_ON_PAGE); |
| 335 reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this); |
| 336 } |
| 337 |
| 338 void Page::MarkEvacuationCandidate() { |
| 339 DCHECK(!IsFlagSet(NEVER_EVACUATE)); |
| 340 DCHECK_NULL(old_to_old_slots_); |
| 341 DCHECK_NULL(typed_old_to_old_slots_); |
| 342 SetFlag(EVACUATION_CANDIDATE); |
| 343 reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this); |
| 344 } |
| 345 |
| 346 void Page::ClearEvacuationCandidate() { |
| 347 DCHECK_NULL(old_to_old_slots_); |
| 348 DCHECK_NULL(typed_old_to_old_slots_); |
| 349 ClearFlag(EVACUATION_CANDIDATE); |
| 350 InitializeFreeListCategories(); |
| 351 } |
| 352 |
| 324 MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode) | 353 MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode) |
| 325 : state_(kOldSpaceState), | 354 : state_(kOldSpaceState), |
| 326 mode_(mode), | 355 mode_(mode), |
| 327 old_iterator_(heap->old_space()), | 356 old_iterator_(heap->old_space()), |
| 328 code_iterator_(heap->code_space()), | 357 code_iterator_(heap->code_space()), |
| 329 map_iterator_(heap->map_space()), | 358 map_iterator_(heap->map_space()), |
| 330 lo_iterator_(heap->lo_space()) {} | 359 lo_iterator_(heap->lo_space()) {} |
| 331 | 360 |
| 332 MemoryChunk* MemoryChunkIterator::next() { | 361 MemoryChunk* MemoryChunkIterator::next() { |
| 333 switch (state_) { | 362 switch (state_) { |
| (...skipping 28 matching lines...) Expand all Loading... |
| 362 } | 391 } |
| 363 case kFinishedState: | 392 case kFinishedState: |
| 364 return nullptr; | 393 return nullptr; |
| 365 default: | 394 default: |
| 366 break; | 395 break; |
| 367 } | 396 } |
| 368 UNREACHABLE(); | 397 UNREACHABLE(); |
| 369 return nullptr; | 398 return nullptr; |
| 370 } | 399 } |
| 371 | 400 |
| 372 | |
| 373 void Page::set_next_page(Page* page) { | 401 void Page::set_next_page(Page* page) { |
| 374 DCHECK(page->owner() == owner()); | 402 DCHECK(page->owner() == owner()); |
| 375 set_next_chunk(page); | 403 set_next_chunk(page); |
| 376 } | 404 } |
| 377 | 405 |
| 378 | |
| 379 void Page::set_prev_page(Page* page) { | 406 void Page::set_prev_page(Page* page) { |
| 380 DCHECK(page->owner() == owner()); | 407 DCHECK(page->owner() == owner()); |
| 381 set_prev_chunk(page); | 408 set_prev_chunk(page); |
| 382 } | 409 } |
| 383 | 410 |
| 411 Page* FreeListCategory::page() { |
| 412 return Page::FromAddress(reinterpret_cast<Address>(this)); |
| 413 } |
| 414 |
| 415 FreeList* FreeListCategory::owner() { |
| 416 return reinterpret_cast<PagedSpace*>( |
| 417 Page::FromAddress(reinterpret_cast<Address>(this))->owner()) |
| 418 ->free_list(); |
| 419 } |
| 420 |
| 421 bool FreeListCategory::is_linked() { |
| 422 return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this; |
| 423 } |
| 384 | 424 |
| 385 // Try linear allocation in the page of alloc_info's allocation top. Does | 425 // Try linear allocation in the page of alloc_info's allocation top. Does |
| 386 // not contain slow case logic (e.g. move to the next page or try free list | 426 // not contain slow case logic (e.g. move to the next page or try free list |
| 387 // allocation) so it can be used by all the allocation functions and for all | 427 // allocation) so it can be used by all the allocation functions and for all |
| 388 // the paged spaces. | 428 // the paged spaces. |
| 389 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { | 429 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { |
| 390 Address current_top = allocation_info_.top(); | 430 Address current_top = allocation_info_.top(); |
| 391 Address new_top = current_top + size_in_bytes; | 431 Address new_top = current_top + size_in_bytes; |
| 392 if (new_top > allocation_info_.limit()) return NULL; | 432 if (new_top > allocation_info_.limit()) return NULL; |
| 393 | 433 |
| (...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 625 other->allocation_info_.Reset(nullptr, nullptr); | 665 other->allocation_info_.Reset(nullptr, nullptr); |
| 626 return true; | 666 return true; |
| 627 } | 667 } |
| 628 return false; | 668 return false; |
| 629 } | 669 } |
| 630 | 670 |
| 631 } // namespace internal | 671 } // namespace internal |
| 632 } // namespace v8 | 672 } // namespace v8 |
| 633 | 673 |
| 634 #endif // V8_HEAP_SPACES_INL_H_ | 674 #endif // V8_HEAP_SPACES_INL_H_ |
| OLD | NEW |