| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 68 | 68 |
| 69 HeapObjectIterator::HeapObjectIterator(Page* page, | 69 HeapObjectIterator::HeapObjectIterator(Page* page, |
| 70 HeapObjectCallback size_func) { | 70 HeapObjectCallback size_func) { |
| 71 Space* owner = page->owner(); | 71 Space* owner = page->owner(); |
| 72 ASSERT(owner == HEAP->old_pointer_space() || | 72 ASSERT(owner == HEAP->old_pointer_space() || |
| 73 owner == HEAP->old_data_space() || | 73 owner == HEAP->old_data_space() || |
| 74 owner == HEAP->map_space() || | 74 owner == HEAP->map_space() || |
| 75 owner == HEAP->cell_space() || | 75 owner == HEAP->cell_space() || |
| 76 owner == HEAP->code_space()); | 76 owner == HEAP->code_space()); |
| 77 Initialize(reinterpret_cast<PagedSpace*>(owner), | 77 Initialize(reinterpret_cast<PagedSpace*>(owner), |
| 78 page->ObjectAreaStart(), | 78 page->area_start(), |
| 79 page->ObjectAreaEnd(), | 79 page->area_end(), |
| 80 kOnePageOnly, | 80 kOnePageOnly, |
| 81 size_func); | 81 size_func); |
| 82 ASSERT(page->WasSweptPrecisely()); | 82 ASSERT(page->WasSweptPrecisely()); |
| 83 } | 83 } |
| 84 | 84 |
| 85 | 85 |
| 86 void HeapObjectIterator::Initialize(PagedSpace* space, | 86 void HeapObjectIterator::Initialize(PagedSpace* space, |
| 87 Address cur, Address end, | 87 Address cur, Address end, |
| 88 HeapObjectIterator::PageMode mode, | 88 HeapObjectIterator::PageMode mode, |
| 89 HeapObjectCallback size_f) { | 89 HeapObjectCallback size_f) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 101 // We have hit the end of the page and should advance to the next block of | 101 // We have hit the end of the page and should advance to the next block of |
| 102 // objects. This happens at the end of the page. | 102 // objects. This happens at the end of the page. |
| 103 bool HeapObjectIterator::AdvanceToNextPage() { | 103 bool HeapObjectIterator::AdvanceToNextPage() { |
| 104 ASSERT(cur_addr_ == cur_end_); | 104 ASSERT(cur_addr_ == cur_end_); |
| 105 if (page_mode_ == kOnePageOnly) return false; | 105 if (page_mode_ == kOnePageOnly) return false; |
| 106 Page* cur_page; | 106 Page* cur_page; |
| 107 if (cur_addr_ == NULL) { | 107 if (cur_addr_ == NULL) { |
| 108 cur_page = space_->anchor(); | 108 cur_page = space_->anchor(); |
| 109 } else { | 109 } else { |
| 110 cur_page = Page::FromAddress(cur_addr_ - 1); | 110 cur_page = Page::FromAddress(cur_addr_ - 1); |
| 111 ASSERT(cur_addr_ == cur_page->ObjectAreaEnd()); | 111 ASSERT(cur_addr_ == cur_page->area_end()); |
| 112 } | 112 } |
| 113 cur_page = cur_page->next_page(); | 113 cur_page = cur_page->next_page(); |
| 114 if (cur_page == space_->anchor()) return false; | 114 if (cur_page == space_->anchor()) return false; |
| 115 cur_addr_ = cur_page->ObjectAreaStart(); | 115 cur_addr_ = cur_page->area_start(); |
| 116 cur_end_ = cur_page->ObjectAreaEnd(); | 116 cur_end_ = cur_page->area_end(); |
| 117 ASSERT(cur_page->WasSweptPrecisely()); | 117 ASSERT(cur_page->WasSweptPrecisely()); |
| 118 return true; | 118 return true; |
| 119 } | 119 } |
| 120 | 120 |
| 121 | 121 |
| 122 // ----------------------------------------------------------------------------- | 122 // ----------------------------------------------------------------------------- |
| 123 // CodeRange | 123 // CodeRange |
| 124 | 124 |
| 125 | 125 |
| 126 CodeRange::CodeRange(Isolate* isolate) | 126 CodeRange::CodeRange(Isolate* isolate) |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); | 220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); |
| 221 FreeBlock current = allocation_list_[current_allocation_block_index_]; | 221 FreeBlock current = allocation_list_[current_allocation_block_index_]; |
| 222 if (aligned_requested >= (current.size - Page::kPageSize)) { | 222 if (aligned_requested >= (current.size - Page::kPageSize)) { |
| 223 // Don't leave a small free block, useless for a large object or chunk. | 223 // Don't leave a small free block, useless for a large object or chunk. |
| 224 *allocated = current.size; | 224 *allocated = current.size; |
| 225 } else { | 225 } else { |
| 226 *allocated = aligned_requested; | 226 *allocated = aligned_requested; |
| 227 } | 227 } |
| 228 ASSERT(*allocated <= current.size); | 228 ASSERT(*allocated <= current.size); |
| 229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); | 229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
| 230 if (!code_range_->Commit(current.start, *allocated, true)) { | 230 if (!MemoryAllocator::CommitCodePage(code_range_, |
| 231 current.start, |
| 232 *allocated)) { |
| 231 *allocated = 0; | 233 *allocated = 0; |
| 232 return NULL; | 234 return NULL; |
| 233 } | 235 } |
| 234 allocation_list_[current_allocation_block_index_].start += *allocated; | 236 allocation_list_[current_allocation_block_index_].start += *allocated; |
| 235 allocation_list_[current_allocation_block_index_].size -= *allocated; | 237 allocation_list_[current_allocation_block_index_].size -= *allocated; |
| 236 if (*allocated == current.size) { | 238 if (*allocated == current.size) { |
| 237 GetNextAllocationBlock(0); // This block is used up, get the next one. | 239 GetNextAllocationBlock(0); // This block is used up, get the next one. |
| 238 } | 240 } |
| 239 return current.start; | 241 return current.start; |
| 240 } | 242 } |
| (...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 351 } | 353 } |
| 352 | 354 |
| 353 | 355 |
| 354 Address MemoryAllocator::AllocateAlignedMemory(size_t size, | 356 Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
| 355 size_t alignment, | 357 size_t alignment, |
| 356 Executability executable, | 358 Executability executable, |
| 357 VirtualMemory* controller) { | 359 VirtualMemory* controller) { |
| 358 VirtualMemory reservation; | 360 VirtualMemory reservation; |
| 359 Address base = ReserveAlignedMemory(size, alignment, &reservation); | 361 Address base = ReserveAlignedMemory(size, alignment, &reservation); |
| 360 if (base == NULL) return NULL; | 362 if (base == NULL) return NULL; |
| 361 if (!reservation.Commit(base, | 363 |
| 362 size, | 364 if (executable == EXECUTABLE) { |
| 363 executable == EXECUTABLE)) { | 365 CommitCodePage(&reservation, base, size); |
| 364 return NULL; | 366 } else { |
| 367 if (!reservation.Commit(base, |
| 368 size, |
| 369 executable == EXECUTABLE)) { |
| 370 return NULL; |
| 371 } |
| 365 } | 372 } |
| 373 |
| 366 controller->TakeControl(&reservation); | 374 controller->TakeControl(&reservation); |
| 367 return base; | 375 return base; |
| 368 } | 376 } |
| 369 | 377 |
| 370 | 378 |
| 371 void Page::InitializeAsAnchor(PagedSpace* owner) { | 379 void Page::InitializeAsAnchor(PagedSpace* owner) { |
| 372 set_owner(owner); | 380 set_owner(owner); |
| 373 set_prev_page(this); | 381 set_prev_page(this); |
| 374 set_next_page(this); | 382 set_next_page(this); |
| 375 } | 383 } |
| 376 | 384 |
| 377 | 385 |
| 378 NewSpacePage* NewSpacePage::Initialize(Heap* heap, | 386 NewSpacePage* NewSpacePage::Initialize(Heap* heap, |
| 379 Address start, | 387 Address start, |
| 380 SemiSpace* semi_space) { | 388 SemiSpace* semi_space) { |
| 389 Address area_start = start + NewSpacePage::kObjectStartOffset; |
| 390 Address area_end = start + Page::kPageSize; |
| 391 |
| 381 MemoryChunk* chunk = MemoryChunk::Initialize(heap, | 392 MemoryChunk* chunk = MemoryChunk::Initialize(heap, |
| 382 start, | 393 start, |
| 383 Page::kPageSize, | 394 Page::kPageSize, |
| 395 area_start, |
| 396 area_end, |
| 384 NOT_EXECUTABLE, | 397 NOT_EXECUTABLE, |
| 385 semi_space); | 398 semi_space); |
| 386 chunk->set_next_chunk(NULL); | 399 chunk->set_next_chunk(NULL); |
| 387 chunk->set_prev_chunk(NULL); | 400 chunk->set_prev_chunk(NULL); |
| 388 chunk->initialize_scan_on_scavenge(true); | 401 chunk->initialize_scan_on_scavenge(true); |
| 389 bool in_to_space = (semi_space->id() != kFromSpace); | 402 bool in_to_space = (semi_space->id() != kFromSpace); |
| 390 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE | 403 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE |
| 391 : MemoryChunk::IN_FROM_SPACE); | 404 : MemoryChunk::IN_FROM_SPACE); |
| 392 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE | 405 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE |
| 393 : MemoryChunk::IN_TO_SPACE)); | 406 : MemoryChunk::IN_TO_SPACE)); |
| 394 NewSpacePage* page = static_cast<NewSpacePage*>(chunk); | 407 NewSpacePage* page = static_cast<NewSpacePage*>(chunk); |
| 395 heap->incremental_marking()->SetNewSpacePageFlags(page); | 408 heap->incremental_marking()->SetNewSpacePageFlags(page); |
| 396 return page; | 409 return page; |
| 397 } | 410 } |
| 398 | 411 |
| 399 | 412 |
| 400 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { | 413 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { |
| 401 set_owner(semi_space); | 414 set_owner(semi_space); |
| 402 set_next_chunk(this); | 415 set_next_chunk(this); |
| 403 set_prev_chunk(this); | 416 set_prev_chunk(this); |
| 404 // Flags marks this invalid page as not being in new-space. | 417 // Flags marks this invalid page as not being in new-space. |
| 405 // All real new-space pages will be in new-space. | 418 // All real new-space pages will be in new-space. |
| 406 SetFlags(0, ~0); | 419 SetFlags(0, ~0); |
| 407 } | 420 } |
| 408 | 421 |
| 409 | 422 |
| 410 MemoryChunk* MemoryChunk::Initialize(Heap* heap, | 423 MemoryChunk* MemoryChunk::Initialize(Heap* heap, |
| 411 Address base, | 424 Address base, |
| 412 size_t size, | 425 size_t size, |
| 426 Address area_start, |
| 427 Address area_end, |
| 413 Executability executable, | 428 Executability executable, |
| 414 Space* owner) { | 429 Space* owner) { |
| 415 MemoryChunk* chunk = FromAddress(base); | 430 MemoryChunk* chunk = FromAddress(base); |
| 416 | 431 |
| 417 ASSERT(base == chunk->address()); | 432 ASSERT(base == chunk->address()); |
| 418 | 433 |
| 419 chunk->heap_ = heap; | 434 chunk->heap_ = heap; |
| 420 chunk->size_ = size; | 435 chunk->size_ = size; |
| 436 chunk->area_start_ = area_start; |
| 437 chunk->area_end_ = area_end; |
| 421 chunk->flags_ = 0; | 438 chunk->flags_ = 0; |
| 422 chunk->set_owner(owner); | 439 chunk->set_owner(owner); |
| 423 chunk->InitializeReservedMemory(); | 440 chunk->InitializeReservedMemory(); |
| 424 chunk->slots_buffer_ = NULL; | 441 chunk->slots_buffer_ = NULL; |
| 425 chunk->skip_list_ = NULL; | 442 chunk->skip_list_ = NULL; |
| 426 chunk->ResetLiveBytes(); | 443 chunk->ResetLiveBytes(); |
| 427 Bitmap::Clear(chunk); | 444 Bitmap::Clear(chunk); |
| 428 chunk->initialize_scan_on_scavenge(false); | 445 chunk->initialize_scan_on_scavenge(false); |
| 429 chunk->SetFlag(WAS_SWEPT_PRECISELY); | 446 chunk->SetFlag(WAS_SWEPT_PRECISELY); |
| 430 | 447 |
| 431 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); | 448 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); |
| 432 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); | 449 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); |
| 433 | 450 |
| 434 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); | 451 if (executable == EXECUTABLE) { |
| 452 chunk->SetFlag(IS_EXECUTABLE); |
| 453 } |
| 435 | 454 |
| 436 if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA); | 455 if (owner == heap->old_data_space()) { |
| 456 chunk->SetFlag(CONTAINS_ONLY_DATA); |
| 457 } |
| 437 | 458 |
| 438 return chunk; | 459 return chunk; |
| 439 } | 460 } |
| 440 | 461 |
| 441 | 462 |
| 442 void MemoryChunk::InsertAfter(MemoryChunk* other) { | 463 void MemoryChunk::InsertAfter(MemoryChunk* other) { |
| 443 next_chunk_ = other->next_chunk_; | 464 next_chunk_ = other->next_chunk_; |
| 444 prev_chunk_ = other; | 465 prev_chunk_ = other; |
| 445 other->next_chunk_->prev_chunk_ = this; | 466 other->next_chunk_->prev_chunk_ = this; |
| 446 other->next_chunk_ = this; | 467 other->next_chunk_ = this; |
| 447 } | 468 } |
| 448 | 469 |
| 449 | 470 |
| 450 void MemoryChunk::Unlink() { | 471 void MemoryChunk::Unlink() { |
| 451 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { | 472 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { |
| 452 heap_->decrement_scan_on_scavenge_pages(); | 473 heap_->decrement_scan_on_scavenge_pages(); |
| 453 ClearFlag(SCAN_ON_SCAVENGE); | 474 ClearFlag(SCAN_ON_SCAVENGE); |
| 454 } | 475 } |
| 455 next_chunk_->prev_chunk_ = prev_chunk_; | 476 next_chunk_->prev_chunk_ = prev_chunk_; |
| 456 prev_chunk_->next_chunk_ = next_chunk_; | 477 prev_chunk_->next_chunk_ = next_chunk_; |
| 457 prev_chunk_ = NULL; | 478 prev_chunk_ = NULL; |
| 458 next_chunk_ = NULL; | 479 next_chunk_ = NULL; |
| 459 } | 480 } |
| 460 | 481 |
| 461 | 482 |
| 462 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, | 483 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
| 463 Executability executable, | 484 Executability executable, |
| 464 Space* owner) { | 485 Space* owner) { |
| 465 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; | 486 size_t chunk_size; |
| 466 Heap* heap = isolate_->heap(); | 487 Heap* heap = isolate_->heap(); |
| 467 Address base = NULL; | 488 Address base = NULL; |
| 468 VirtualMemory reservation; | 489 VirtualMemory reservation; |
| 490 Address area_start = NULL; |
| 491 Address area_end = NULL; |
| 469 if (executable == EXECUTABLE) { | 492 if (executable == EXECUTABLE) { |
| 493 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, |
| 494 OS::CommitPageSize()) + CodePageGuardSize(); |
| 495 |
| 470 // Check executable memory limit. | 496 // Check executable memory limit. |
| 471 if (size_executable_ + chunk_size > capacity_executable_) { | 497 if (size_executable_ + chunk_size > capacity_executable_) { |
| 472 LOG(isolate_, | 498 LOG(isolate_, |
| 473 StringEvent("MemoryAllocator::AllocateRawMemory", | 499 StringEvent("MemoryAllocator::AllocateRawMemory", |
| 474 "V8 Executable Allocation capacity exceeded")); | 500 "V8 Executable Allocation capacity exceeded")); |
| 475 return NULL; | 501 return NULL; |
| 476 } | 502 } |
| 477 | 503 |
| 478 // Allocate executable memory either from code range or from the | 504 // Allocate executable memory either from code range or from the |
| 479 // OS. | 505 // OS. |
| 480 if (isolate_->code_range()->exists()) { | 506 if (isolate_->code_range()->exists()) { |
| 481 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); | 507 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); |
| 482 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), | 508 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
| 483 MemoryChunk::kAlignment)); | 509 MemoryChunk::kAlignment)); |
| 484 if (base == NULL) return NULL; | 510 if (base == NULL) return NULL; |
| 485 size_ += chunk_size; | 511 size_ += chunk_size; |
| 486 // Update executable memory size. | 512 // Update executable memory size. |
| 487 size_executable_ += chunk_size; | 513 size_executable_ += chunk_size; |
| 488 } else { | 514 } else { |
| 489 base = AllocateAlignedMemory(chunk_size, | 515 base = AllocateAlignedMemory(chunk_size, |
| 490 MemoryChunk::kAlignment, | 516 MemoryChunk::kAlignment, |
| 491 executable, | 517 executable, |
| 492 &reservation); | 518 &reservation); |
| 493 if (base == NULL) return NULL; | 519 if (base == NULL) return NULL; |
| 494 // Update executable memory size. | 520 // Update executable memory size. |
| 495 size_executable_ += reservation.size(); | 521 size_executable_ += reservation.size(); |
| 496 } | 522 } |
| 523 |
| 524 #ifdef DEBUG |
| 525 ZapBlock(base, CodePageGuardStartOffset()); |
| 526 ZapBlock(base + CodePageAreaStartOffset(), body_size); |
| 527 #endif |
| 528 area_start = base + CodePageAreaStartOffset(); |
| 529 area_end = area_start + body_size; |
| 497 } else { | 530 } else { |
| 531 chunk_size = MemoryChunk::kObjectStartOffset + body_size; |
| 498 base = AllocateAlignedMemory(chunk_size, | 532 base = AllocateAlignedMemory(chunk_size, |
| 499 MemoryChunk::kAlignment, | 533 MemoryChunk::kAlignment, |
| 500 executable, | 534 executable, |
| 501 &reservation); | 535 &reservation); |
| 502 | 536 |
| 503 if (base == NULL) return NULL; | 537 if (base == NULL) return NULL; |
| 538 |
| 539 #ifdef DEBUG |
| 540 ZapBlock(base, chunk_size); |
| 541 #endif |
| 542 |
| 543 area_start = base + Page::kObjectStartOffset; |
| 544 area_end = base + chunk_size; |
| 504 } | 545 } |
| 505 | 546 |
| 506 #ifdef DEBUG | |
| 507 ZapBlock(base, chunk_size); | |
| 508 #endif | |
| 509 isolate_->counters()->memory_allocated()-> | 547 isolate_->counters()->memory_allocated()-> |
| 510 Increment(static_cast<int>(chunk_size)); | 548 Increment(static_cast<int>(chunk_size)); |
| 511 | 549 |
| 512 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); | 550 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
| 513 if (owner != NULL) { | 551 if (owner != NULL) { |
| 514 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | 552 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
| 515 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); | 553 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
| 516 } | 554 } |
| 517 | 555 |
| 518 MemoryChunk* result = MemoryChunk::Initialize(heap, | 556 MemoryChunk* result = MemoryChunk::Initialize(heap, |
| 519 base, | 557 base, |
| 520 chunk_size, | 558 chunk_size, |
| 559 area_start, |
| 560 area_end, |
| 521 executable, | 561 executable, |
| 522 owner); | 562 owner); |
| 523 result->set_reserved_memory(&reservation); | 563 result->set_reserved_memory(&reservation); |
| 524 return result; | 564 return result; |
| 525 } | 565 } |
| 526 | 566 |
| 527 | 567 |
| 528 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, | 568 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, |
| 529 Executability executable) { | 569 Executability executable) { |
| 530 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); | 570 MemoryChunk* chunk = AllocateChunk(owner->AreaSize(), |
| 571 executable, |
| 572 owner); |
| 531 | 573 |
| 532 if (chunk == NULL) return NULL; | 574 if (chunk == NULL) return NULL; |
| 533 | 575 |
| 534 return Page::Initialize(isolate_->heap(), chunk, executable, owner); | 576 return Page::Initialize(isolate_->heap(), chunk, executable, owner); |
| 535 } | 577 } |
| 536 | 578 |
| 537 | 579 |
| 538 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 580 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 539 Executability executable, | 581 Executability executable, |
| 540 Space* owner) { | 582 Space* owner) { |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 641 #ifdef DEBUG | 683 #ifdef DEBUG |
| 642 void MemoryAllocator::ReportStatistics() { | 684 void MemoryAllocator::ReportStatistics() { |
| 643 float pct = static_cast<float>(capacity_ - size_) / capacity_; | 685 float pct = static_cast<float>(capacity_ - size_) / capacity_; |
| 644 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 686 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 645 ", used: %" V8_PTR_PREFIX "d" | 687 ", used: %" V8_PTR_PREFIX "d" |
| 646 ", available: %%%d\n\n", | 688 ", available: %%%d\n\n", |
| 647 capacity_, size_, static_cast<int>(pct*100)); | 689 capacity_, size_, static_cast<int>(pct*100)); |
| 648 } | 690 } |
| 649 #endif | 691 #endif |
| 650 | 692 |
| 693 |
| 694 int MemoryAllocator::CodePageGuardStartOffset() { |
| 695 // We are guarding code pages: the first OS page after the header |
| 696 // will be protected as non-writable. |
| 697 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize()); |
| 698 } |
| 699 |
| 700 |
| 701 int MemoryAllocator::CodePageGuardSize() { |
| 702 return OS::CommitPageSize(); |
| 703 } |
| 704 |
| 705 |
| 706 int MemoryAllocator::CodePageAreaStartOffset() { |
| 707 // We are guarding code pages: the first OS page after the header |
| 708 // will be protected as non-writable. |
| 709 return CodePageGuardStartOffset() + CodePageGuardSize(); |
| 710 } |
| 711 |
| 712 |
| 713 int MemoryAllocator::CodePageAreaEndOffset() { |
| 714 // We are guarding code pages: the last OS page will be protected as |
| 715 // non-writable. |
| 716 return Page::kPageSize - OS::CommitPageSize(); |
| 717 } |
| 718 |
| 719 |
| 720 bool MemoryAllocator::CommitCodePage(VirtualMemory* vm, |
| 721 Address start, |
| 722 size_t size) { |
| 723 // Commit page header (not executable). |
| 724 if (!vm->Commit(start, |
| 725 CodePageGuardStartOffset(), |
| 726 false)) { |
| 727 return false; |
| 728 } |
| 729 |
| 730 // Create guard page after the header. |
| 731 if (!vm->Guard(start + CodePageGuardStartOffset())) { |
| 732 return false; |
| 733 } |
| 734 |
| 735 // Commit page body (executable). |
| 736 size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize(); |
| 737 if (!vm->Commit(start + CodePageAreaStartOffset(), |
| 738 area_size, |
| 739 true)) { |
| 740 return false; |
| 741 } |
| 742 |
| 743 // Create guard page after the allocatable area. |
| 744 if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) { |
| 745 return false; |
| 746 } |
| 747 |
| 748 return true; |
| 749 } |
| 750 |
| 751 |
| 651 // ----------------------------------------------------------------------------- | 752 // ----------------------------------------------------------------------------- |
| 652 // PagedSpace implementation | 753 // PagedSpace implementation |
| 653 | 754 |
| 654 PagedSpace::PagedSpace(Heap* heap, | 755 PagedSpace::PagedSpace(Heap* heap, |
| 655 intptr_t max_capacity, | 756 intptr_t max_capacity, |
| 656 AllocationSpace id, | 757 AllocationSpace id, |
| 657 Executability executable) | 758 Executability executable) |
| 658 : Space(heap, id, executable), | 759 : Space(heap, id, executable), |
| 659 free_list_(this), | 760 free_list_(this), |
| 660 was_swept_conservatively_(false), | 761 was_swept_conservatively_(false), |
| 661 first_unswept_page_(Page::FromAddress(NULL)) { | 762 first_unswept_page_(Page::FromAddress(NULL)) { |
| 763 if (id == CODE_SPACE) { |
| 764 area_size_ = heap->isolate()->memory_allocator()-> |
| 765 CodePageAreaSize(); |
| 766 } else { |
| 767 area_size_ = Page::kPageSize - Page::kObjectStartOffset; |
| 768 } |
| 662 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 769 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
| 663 * Page::kObjectAreaSize; | 770 * AreaSize(); |
| 664 accounting_stats_.Clear(); | 771 accounting_stats_.Clear(); |
| 665 | 772 |
| 666 allocation_info_.top = NULL; | 773 allocation_info_.top = NULL; |
| 667 allocation_info_.limit = NULL; | 774 allocation_info_.limit = NULL; |
| 668 | 775 |
| 669 anchor_.InitializeAsAnchor(this); | 776 anchor_.InitializeAsAnchor(this); |
| 670 } | 777 } |
| 671 | 778 |
| 672 | 779 |
| 673 bool PagedSpace::Setup() { | 780 bool PagedSpace::Setup() { |
| (...skipping 29 matching lines...) Expand all Loading... |
| 703 Address cur = obj->address(); | 810 Address cur = obj->address(); |
| 704 Address next = cur + obj->Size(); | 811 Address next = cur + obj->Size(); |
| 705 if ((cur <= addr) && (addr < next)) return obj; | 812 if ((cur <= addr) && (addr < next)) return obj; |
| 706 } | 813 } |
| 707 | 814 |
| 708 UNREACHABLE(); | 815 UNREACHABLE(); |
| 709 return Failure::Exception(); | 816 return Failure::Exception(); |
| 710 } | 817 } |
| 711 | 818 |
| 712 bool PagedSpace::CanExpand() { | 819 bool PagedSpace::CanExpand() { |
| 713 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); | 820 ASSERT(max_capacity_ % AreaSize() == 0); |
| 714 ASSERT(Capacity() % Page::kObjectAreaSize == 0); | 821 ASSERT(Capacity() % AreaSize() == 0); |
| 715 | 822 |
| 716 if (Capacity() == max_capacity_) return false; | 823 if (Capacity() == max_capacity_) return false; |
| 717 | 824 |
| 718 ASSERT(Capacity() < max_capacity_); | 825 ASSERT(Capacity() < max_capacity_); |
| 719 | 826 |
| 720 // Are we going to exceed capacity for this space? | 827 // Are we going to exceed capacity for this space? |
| 721 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; | 828 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; |
| 722 | 829 |
| 723 return true; | 830 return true; |
| 724 } | 831 } |
| (...skipping 19 matching lines...) Expand all Loading... |
| 744 while (it.has_next()) { | 851 while (it.has_next()) { |
| 745 it.next(); | 852 it.next(); |
| 746 count++; | 853 count++; |
| 747 } | 854 } |
| 748 return count; | 855 return count; |
| 749 } | 856 } |
| 750 | 857 |
| 751 | 858 |
| 752 void PagedSpace::ReleasePage(Page* page) { | 859 void PagedSpace::ReleasePage(Page* page) { |
| 753 ASSERT(page->LiveBytes() == 0); | 860 ASSERT(page->LiveBytes() == 0); |
| 861 ASSERT(AreaSize() == page->area_size()); |
| 754 | 862 |
| 755 // Adjust list of unswept pages if the page is the head of the list. | 863 // Adjust list of unswept pages if the page is the head of the list. |
| 756 if (first_unswept_page_ == page) { | 864 if (first_unswept_page_ == page) { |
| 757 first_unswept_page_ = page->next_page(); | 865 first_unswept_page_ = page->next_page(); |
| 758 if (first_unswept_page_ == anchor()) { | 866 if (first_unswept_page_ == anchor()) { |
| 759 first_unswept_page_ = Page::FromAddress(NULL); | 867 first_unswept_page_ = Page::FromAddress(NULL); |
| 760 } | 868 } |
| 761 } | 869 } |
| 762 | 870 |
| 763 if (page->WasSwept()) { | 871 if (page->WasSwept()) { |
| 764 intptr_t size = free_list_.EvictFreeListItems(page); | 872 intptr_t size = free_list_.EvictFreeListItems(page); |
| 765 accounting_stats_.AllocateBytes(size); | 873 accounting_stats_.AllocateBytes(size); |
| 766 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size)); | 874 ASSERT_EQ(AreaSize(), static_cast<int>(size)); |
| 767 } | 875 } |
| 768 | 876 |
| 769 if (Page::FromAllocationTop(allocation_info_.top) == page) { | 877 if (Page::FromAllocationTop(allocation_info_.top) == page) { |
| 770 allocation_info_.top = allocation_info_.limit = NULL; | 878 allocation_info_.top = allocation_info_.limit = NULL; |
| 771 } | 879 } |
| 772 | 880 |
| 773 page->Unlink(); | 881 page->Unlink(); |
| 774 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { | 882 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { |
| 775 heap()->isolate()->memory_allocator()->Free(page); | 883 heap()->isolate()->memory_allocator()->Free(page); |
| 776 } else { | 884 } else { |
| 777 heap()->QueueMemoryChunkForFree(page); | 885 heap()->QueueMemoryChunkForFree(page); |
| 778 } | 886 } |
| 779 | 887 |
| 780 ASSERT(Capacity() > 0); | 888 ASSERT(Capacity() > 0); |
| 781 ASSERT(Capacity() % Page::kObjectAreaSize == 0); | 889 ASSERT(Capacity() % AreaSize() == 0); |
| 782 accounting_stats_.ShrinkSpace(Page::kObjectAreaSize); | 890 accounting_stats_.ShrinkSpace(AreaSize()); |
| 783 } | 891 } |
| 784 | 892 |
| 785 | 893 |
| 786 void PagedSpace::ReleaseAllUnusedPages() { | 894 void PagedSpace::ReleaseAllUnusedPages() { |
| 787 PageIterator it(this); | 895 PageIterator it(this); |
| 788 while (it.has_next()) { | 896 while (it.has_next()) { |
| 789 Page* page = it.next(); | 897 Page* page = it.next(); |
| 790 if (!page->WasSwept()) { | 898 if (!page->WasSwept()) { |
| 791 if (page->LiveBytes() == 0) ReleasePage(page); | 899 if (page->LiveBytes() == 0) ReleasePage(page); |
| 792 } else { | 900 } else { |
| 793 HeapObject* obj = HeapObject::FromAddress(page->body()); | 901 HeapObject* obj = HeapObject::FromAddress(page->area_start()); |
| 794 if (obj->IsFreeSpace() && | 902 if (obj->IsFreeSpace() && |
| 795 FreeSpace::cast(obj)->size() == Page::kObjectAreaSize) { | 903 FreeSpace::cast(obj)->size() == AreaSize()) { |
| 796 // Sometimes we allocate memory from free list but don't | 904 // Sometimes we allocate memory from free list but don't |
| 797 // immediately initialize it (e.g. see PagedSpace::ReserveSpace | 905 // immediately initialize it (e.g. see PagedSpace::ReserveSpace |
| 798 // called from Heap::ReserveSpace that can cause GC before | 906 // called from Heap::ReserveSpace that can cause GC before |
| 799 // reserved space is actually initialized). | 907 // reserved space is actually initialized). |
| 800 // Thus we can't simply assume that obj represents a valid | 908 // Thus we can't simply assume that obj represents a valid |
| 801 // node still owned by a free list | 909 // node still owned by a free list |
| 802 // Instead we should verify that the page is fully covered | 910 // Instead we should verify that the page is fully covered |
| 803 // by free list items. | 911 // by free list items. |
| 804 FreeList::SizeStats sizes; | 912 FreeList::SizeStats sizes; |
| 805 free_list_.CountFreeListItems(page, &sizes); | 913 free_list_.CountFreeListItems(page, &sizes); |
| 806 if (sizes.Total() == Page::kObjectAreaSize) { | 914 if (sizes.Total() == AreaSize()) { |
| 807 ReleasePage(page); | 915 ReleasePage(page); |
| 808 } | 916 } |
| 809 } | 917 } |
| 810 } | 918 } |
| 811 } | 919 } |
| 812 heap()->FreeQueuedChunks(); | 920 heap()->FreeQueuedChunks(); |
| 813 } | 921 } |
| 814 | 922 |
| 815 | 923 |
| 816 #ifdef DEBUG | 924 #ifdef DEBUG |
| (...skipping 10 matching lines...) Expand all Loading... |
| 827 (allocation_info_.top == allocation_info_.limit); | 935 (allocation_info_.top == allocation_info_.limit); |
| 828 PageIterator page_iterator(this); | 936 PageIterator page_iterator(this); |
| 829 while (page_iterator.has_next()) { | 937 while (page_iterator.has_next()) { |
| 830 Page* page = page_iterator.next(); | 938 Page* page = page_iterator.next(); |
| 831 ASSERT(page->owner() == this); | 939 ASSERT(page->owner() == this); |
| 832 if (page == Page::FromAllocationTop(allocation_info_.top)) { | 940 if (page == Page::FromAllocationTop(allocation_info_.top)) { |
| 833 allocation_pointer_found_in_space = true; | 941 allocation_pointer_found_in_space = true; |
| 834 } | 942 } |
| 835 ASSERT(page->WasSweptPrecisely()); | 943 ASSERT(page->WasSweptPrecisely()); |
| 836 HeapObjectIterator it(page, NULL); | 944 HeapObjectIterator it(page, NULL); |
| 837 Address end_of_previous_object = page->ObjectAreaStart(); | 945 Address end_of_previous_object = page->area_start(); |
| 838 Address top = page->ObjectAreaEnd(); | 946 Address top = page->area_end(); |
| 839 int black_size = 0; | 947 int black_size = 0; |
| 840 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { | 948 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { |
| 841 ASSERT(end_of_previous_object <= object->address()); | 949 ASSERT(end_of_previous_object <= object->address()); |
| 842 | 950 |
| 843 // The first word should be a map, and we expect all map pointers to | 951 // The first word should be a map, and we expect all map pointers to |
| 844 // be in map space. | 952 // be in map space. |
| 845 Map* map = object->map(); | 953 Map* map = object->map(); |
| 846 ASSERT(map->IsMap()); | 954 ASSERT(map->IsMap()); |
| 847 ASSERT(heap()->map_space()->Contains(map)); | 955 ASSERT(heap()->map_space()->Contains(map)); |
| 848 | 956 |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1041 // TODO(gc): Change the limit on new-space allocation to prevent this | 1149 // TODO(gc): Change the limit on new-space allocation to prevent this |
| 1042 // from happening (all such allocations should go directly to LOSpace). | 1150 // from happening (all such allocations should go directly to LOSpace). |
| 1043 return false; | 1151 return false; |
| 1044 } | 1152 } |
| 1045 if (!to_space_.AdvancePage()) { | 1153 if (!to_space_.AdvancePage()) { |
| 1046 // Failed to get a new page in to-space. | 1154 // Failed to get a new page in to-space. |
| 1047 return false; | 1155 return false; |
| 1048 } | 1156 } |
| 1049 | 1157 |
| 1050 // Clear remainder of current page. | 1158 // Clear remainder of current page. |
| 1051 Address limit = NewSpacePage::FromLimit(top)->body_limit(); | 1159 Address limit = NewSpacePage::FromLimit(top)->area_end(); |
| 1052 if (heap()->gc_state() == Heap::SCAVENGE) { | 1160 if (heap()->gc_state() == Heap::SCAVENGE) { |
| 1053 heap()->promotion_queue()->SetNewLimit(limit); | 1161 heap()->promotion_queue()->SetNewLimit(limit); |
| 1054 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage(); | 1162 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage(); |
| 1055 } | 1163 } |
| 1056 | 1164 |
| 1057 int remaining_in_page = static_cast<int>(limit - top); | 1165 int remaining_in_page = static_cast<int>(limit - top); |
| 1058 heap()->CreateFillerObjectAt(top, remaining_in_page); | 1166 heap()->CreateFillerObjectAt(top, remaining_in_page); |
| 1059 pages_used_++; | 1167 pages_used_++; |
| 1060 UpdateAllocationInfo(); | 1168 UpdateAllocationInfo(); |
| 1061 | 1169 |
| (...skipping 29 matching lines...) Expand all Loading... |
| 1091 | 1199 |
| 1092 #ifdef DEBUG | 1200 #ifdef DEBUG |
| 1093 // We do not use the SemiSpaceIterator because verification doesn't assume | 1201 // We do not use the SemiSpaceIterator because verification doesn't assume |
| 1094 // that it works (it depends on the invariants we are checking). | 1202 // that it works (it depends on the invariants we are checking). |
| 1095 void NewSpace::Verify() { | 1203 void NewSpace::Verify() { |
| 1096 // The allocation pointer should be in the space or at the very end. | 1204 // The allocation pointer should be in the space or at the very end. |
| 1097 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1205 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1098 | 1206 |
| 1099 // There should be objects packed in from the low address up to the | 1207 // There should be objects packed in from the low address up to the |
| 1100 // allocation pointer. | 1208 // allocation pointer. |
| 1101 Address current = to_space_.first_page()->body(); | 1209 Address current = to_space_.first_page()->area_start(); |
| 1102 CHECK_EQ(current, to_space_.space_start()); | 1210 CHECK_EQ(current, to_space_.space_start()); |
| 1103 | 1211 |
| 1104 while (current != top()) { | 1212 while (current != top()) { |
| 1105 if (!NewSpacePage::IsAtEnd(current)) { | 1213 if (!NewSpacePage::IsAtEnd(current)) { |
| 1106 // The allocation pointer should not be in the middle of an object. | 1214 // The allocation pointer should not be in the middle of an object. |
| 1107 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || | 1215 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || |
| 1108 current < top()); | 1216 current < top()); |
| 1109 | 1217 |
| 1110 HeapObject* object = HeapObject::FromAddress(current); | 1218 HeapObject* object = HeapObject::FromAddress(current); |
| 1111 | 1219 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 1126 VerifyPointersVisitor visitor; | 1234 VerifyPointersVisitor visitor; |
| 1127 int size = object->Size(); | 1235 int size = object->Size(); |
| 1128 object->IterateBody(map->instance_type(), size, &visitor); | 1236 object->IterateBody(map->instance_type(), size, &visitor); |
| 1129 | 1237 |
| 1130 current += size; | 1238 current += size; |
| 1131 } else { | 1239 } else { |
| 1132 // At end of page, switch to next page. | 1240 // At end of page, switch to next page. |
| 1133 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); | 1241 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); |
| 1134 // Next page should be valid. | 1242 // Next page should be valid. |
| 1135 CHECK(!page->is_anchor()); | 1243 CHECK(!page->is_anchor()); |
| 1136 current = page->body(); | 1244 current = page->area_start(); |
| 1137 } | 1245 } |
| 1138 } | 1246 } |
| 1139 | 1247 |
| 1140 // Check semi-spaces. | 1248 // Check semi-spaces. |
| 1141 ASSERT_EQ(from_space_.id(), kFromSpace); | 1249 ASSERT_EQ(from_space_.id(), kFromSpace); |
| 1142 ASSERT_EQ(to_space_.id(), kToSpace); | 1250 ASSERT_EQ(to_space_.id(), kToSpace); |
| 1143 from_space_.Verify(); | 1251 from_space_.Verify(); |
| 1144 to_space_.Verify(); | 1252 to_space_.Verify(); |
| 1145 } | 1253 } |
| 1146 #endif | 1254 #endif |
| (...skipping 759 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1906 sum += free_space->Size(); | 2014 sum += free_space->Size(); |
| 1907 } | 2015 } |
| 1908 n = n->next(); | 2016 n = n->next(); |
| 1909 } | 2017 } |
| 1910 return sum; | 2018 return sum; |
| 1911 } | 2019 } |
| 1912 | 2020 |
| 1913 | 2021 |
| 1914 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) { | 2022 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) { |
| 1915 sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p); | 2023 sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p); |
| 1916 if (sizes->huge_size_ < Page::kObjectAreaSize) { | 2024 if (sizes->huge_size_ < p->area_size()) { |
| 1917 sizes->small_size_ = CountFreeListItemsInList(small_list_, p); | 2025 sizes->small_size_ = CountFreeListItemsInList(small_list_, p); |
| 1918 sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p); | 2026 sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p); |
| 1919 sizes->large_size_ = CountFreeListItemsInList(large_list_, p); | 2027 sizes->large_size_ = CountFreeListItemsInList(large_list_, p); |
| 1920 } else { | 2028 } else { |
| 1921 sizes->small_size_ = 0; | 2029 sizes->small_size_ = 0; |
| 1922 sizes->medium_size_ = 0; | 2030 sizes->medium_size_ = 0; |
| 1923 sizes->large_size_ = 0; | 2031 sizes->large_size_ = 0; |
| 1924 } | 2032 } |
| 1925 } | 2033 } |
| 1926 | 2034 |
| 1927 | 2035 |
| 1928 static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) { | 2036 static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) { |
| 1929 intptr_t sum = 0; | 2037 intptr_t sum = 0; |
| 1930 while (*n != NULL) { | 2038 while (*n != NULL) { |
| 1931 if (Page::FromAddress((*n)->address()) == p) { | 2039 if (Page::FromAddress((*n)->address()) == p) { |
| 1932 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n); | 2040 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n); |
| 1933 sum += free_space->Size(); | 2041 sum += free_space->Size(); |
| 1934 *n = (*n)->next(); | 2042 *n = (*n)->next(); |
| 1935 } else { | 2043 } else { |
| 1936 n = (*n)->next_address(); | 2044 n = (*n)->next_address(); |
| 1937 } | 2045 } |
| 1938 } | 2046 } |
| 1939 return sum; | 2047 return sum; |
| 1940 } | 2048 } |
| 1941 | 2049 |
| 1942 | 2050 |
| 1943 intptr_t FreeList::EvictFreeListItems(Page* p) { | 2051 intptr_t FreeList::EvictFreeListItems(Page* p) { |
| 1944 intptr_t sum = EvictFreeListItemsInList(&huge_list_, p); | 2052 intptr_t sum = EvictFreeListItemsInList(&huge_list_, p); |
| 1945 | 2053 |
| 1946 if (sum < Page::kObjectAreaSize) { | 2054 if (sum < p->area_size()) { |
| 1947 sum += EvictFreeListItemsInList(&small_list_, p) + | 2055 sum += EvictFreeListItemsInList(&small_list_, p) + |
| 1948 EvictFreeListItemsInList(&medium_list_, p) + | 2056 EvictFreeListItemsInList(&medium_list_, p) + |
| 1949 EvictFreeListItemsInList(&large_list_, p); | 2057 EvictFreeListItemsInList(&large_list_, p); |
| 1950 } | 2058 } |
| 1951 | 2059 |
| 1952 available_ -= static_cast<int>(sum); | 2060 available_ -= static_cast<int>(sum); |
| 1953 | 2061 |
| 1954 return sum; | 2062 return sum; |
| 1955 } | 2063 } |
| 1956 | 2064 |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2057 } while (p != anchor()); | 2165 } while (p != anchor()); |
| 2058 } | 2166 } |
| 2059 first_unswept_page_ = Page::FromAddress(NULL); | 2167 first_unswept_page_ = Page::FromAddress(NULL); |
| 2060 | 2168 |
| 2061 // Clear the free list before a full GC---it will be rebuilt afterward. | 2169 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2062 free_list_.Reset(); | 2170 free_list_.Reset(); |
| 2063 } | 2171 } |
| 2064 | 2172 |
| 2065 | 2173 |
| 2066 bool PagedSpace::ReserveSpace(int size_in_bytes) { | 2174 bool PagedSpace::ReserveSpace(int size_in_bytes) { |
| 2067 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize); | 2175 ASSERT(size_in_bytes <= AreaSize()); |
| 2068 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); | 2176 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); |
| 2069 Address current_top = allocation_info_.top; | 2177 Address current_top = allocation_info_.top; |
| 2070 Address new_top = current_top + size_in_bytes; | 2178 Address new_top = current_top + size_in_bytes; |
| 2071 if (new_top <= allocation_info_.limit) return true; | 2179 if (new_top <= allocation_info_.limit) return true; |
| 2072 | 2180 |
| 2073 HeapObject* new_area = free_list_.Allocate(size_in_bytes); | 2181 HeapObject* new_area = free_list_.Allocate(size_in_bytes); |
| 2074 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); | 2182 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); |
| 2075 if (new_area == NULL) return false; | 2183 if (new_area == NULL) return false; |
| 2076 | 2184 |
| 2077 int old_linear_size = static_cast<int>(limit() - top()); | 2185 int old_linear_size = static_cast<int>(limit() - top()); |
| (...skipping 355 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2433 return Failure::RetryAfterGC(identity()); | 2541 return Failure::RetryAfterGC(identity()); |
| 2434 } | 2542 } |
| 2435 | 2543 |
| 2436 if (Size() + object_size > max_capacity_) { | 2544 if (Size() + object_size > max_capacity_) { |
| 2437 return Failure::RetryAfterGC(identity()); | 2545 return Failure::RetryAfterGC(identity()); |
| 2438 } | 2546 } |
| 2439 | 2547 |
| 2440 LargePage* page = heap()->isolate()->memory_allocator()-> | 2548 LargePage* page = heap()->isolate()->memory_allocator()-> |
| 2441 AllocateLargePage(object_size, executable, this); | 2549 AllocateLargePage(object_size, executable, this); |
| 2442 if (page == NULL) return Failure::RetryAfterGC(identity()); | 2550 if (page == NULL) return Failure::RetryAfterGC(identity()); |
| 2443 ASSERT(page->body_size() >= object_size); | 2551 ASSERT(page->area_size() >= object_size); |
| 2444 | 2552 |
| 2445 size_ += static_cast<int>(page->size()); | 2553 size_ += static_cast<int>(page->size()); |
| 2446 objects_size_ += object_size; | 2554 objects_size_ += object_size; |
| 2447 page_count_++; | 2555 page_count_++; |
| 2448 page->set_next_page(first_page_); | 2556 page->set_next_page(first_page_); |
| 2449 first_page_ = page; | 2557 first_page_ = page; |
| 2450 | 2558 |
| 2451 HeapObject* object = page->GetObject(); | 2559 HeapObject* object = page->GetObject(); |
| 2452 | 2560 |
| 2453 #ifdef DEBUG | 2561 #ifdef DEBUG |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2549 // We do not assume that the large object iterator works, because it depends | 2657 // We do not assume that the large object iterator works, because it depends |
| 2550 // on the invariants we are checking during verification. | 2658 // on the invariants we are checking during verification. |
| 2551 void LargeObjectSpace::Verify() { | 2659 void LargeObjectSpace::Verify() { |
| 2552 for (LargePage* chunk = first_page_; | 2660 for (LargePage* chunk = first_page_; |
| 2553 chunk != NULL; | 2661 chunk != NULL; |
| 2554 chunk = chunk->next_page()) { | 2662 chunk = chunk->next_page()) { |
| 2555 // Each chunk contains an object that starts at the large object page's | 2663 // Each chunk contains an object that starts at the large object page's |
| 2556 // object area start. | 2664 // object area start. |
| 2557 HeapObject* object = chunk->GetObject(); | 2665 HeapObject* object = chunk->GetObject(); |
| 2558 Page* page = Page::FromAddress(object->address()); | 2666 Page* page = Page::FromAddress(object->address()); |
| 2559 ASSERT(object->address() == page->ObjectAreaStart()); | 2667 ASSERT(object->address() == page->area_start()); |
| 2560 | 2668 |
| 2561 // The first word should be a map, and we expect all map pointers to be | 2669 // The first word should be a map, and we expect all map pointers to be |
| 2562 // in map space. | 2670 // in map space. |
| 2563 Map* map = object->map(); | 2671 Map* map = object->map(); |
| 2564 ASSERT(map->IsMap()); | 2672 ASSERT(map->IsMap()); |
| 2565 ASSERT(heap()->map_space()->Contains(map)); | 2673 ASSERT(heap()->map_space()->Contains(map)); |
| 2566 | 2674 |
| 2567 // We have only code, sequential strings, external strings | 2675 // We have only code, sequential strings, external strings |
| 2568 // (sequential strings that have been morphed into external | 2676 // (sequential strings that have been morphed into external |
| 2569 // strings), fixed arrays, and byte arrays in large object space. | 2677 // strings), fixed arrays, and byte arrays in large object space. |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2650 object->ShortPrint(); | 2758 object->ShortPrint(); |
| 2651 PrintF("\n"); | 2759 PrintF("\n"); |
| 2652 } | 2760 } |
| 2653 printf(" --------------------------------------\n"); | 2761 printf(" --------------------------------------\n"); |
| 2654 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 2762 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 2655 } | 2763 } |
| 2656 | 2764 |
| 2657 #endif // DEBUG | 2765 #endif // DEBUG |
| 2658 | 2766 |
| 2659 } } // namespace v8::internal | 2767 } } // namespace v8::internal |
| OLD | NEW |