Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 61 NULL, | 61 NULL, |
| 62 NULL, | 62 NULL, |
| 63 kAllPagesInSpace, | 63 kAllPagesInSpace, |
| 64 size_func); | 64 size_func); |
| 65 } | 65 } |
| 66 | 66 |
| 67 | 67 |
| 68 HeapObjectIterator::HeapObjectIterator(Page* page, | 68 HeapObjectIterator::HeapObjectIterator(Page* page, |
| 69 HeapObjectCallback size_func) { | 69 HeapObjectCallback size_func) { |
| 70 Space* owner = page->owner(); | 70 Space* owner = page->owner(); |
| 71 ASSERT(owner == Heap::old_pointer_space() || | 71 ASSERT(owner == HEAP->old_pointer_space() || |
| 72 owner == Heap::old_data_space() || | 72 owner == HEAP->old_data_space() || |
| 73 owner == Heap::map_space() || | 73 owner == HEAP->map_space() || |
| 74 owner == Heap::cell_space() || | 74 owner == HEAP->cell_space() || |
| 75 owner == Heap::code_space()); | 75 owner == HEAP->code_space()); |
| 76 Initialize(reinterpret_cast<PagedSpace*>(owner), | 76 Initialize(reinterpret_cast<PagedSpace*>(owner), |
| 77 page->ObjectAreaStart(), | 77 page->ObjectAreaStart(), |
| 78 page->ObjectAreaEnd(), | 78 page->ObjectAreaEnd(), |
| 79 kOnePageOnly, | 79 kOnePageOnly, |
| 80 size_func); | 80 size_func); |
| 81 ASSERT(!page->IsFlagSet(Page::WAS_SWEPT_CONSERVATIVELY)); | 81 ASSERT(!page->IsFlagSet(Page::WAS_SWEPT_CONSERVATIVELY)); |
| 82 } | 82 } |
| 83 | 83 |
| 84 | 84 |
| 85 void HeapObjectIterator::Initialize(PagedSpace* space, | 85 void HeapObjectIterator::Initialize(PagedSpace* space, |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 125 #ifdef DEBUG | 125 #ifdef DEBUG |
| 126 void HeapObjectIterator::Verify() { | 126 void HeapObjectIterator::Verify() { |
| 127 // TODO(gc): We should do something here. | 127 // TODO(gc): We should do something here. |
| 128 } | 128 } |
| 129 #endif | 129 #endif |
| 130 | 130 |
| 131 | 131 |
| 132 // ----------------------------------------------------------------------------- | 132 // ----------------------------------------------------------------------------- |
| 133 // CodeRange | 133 // CodeRange |
| 134 | 134 |
| 135 List<CodeRange::FreeBlock> CodeRange::free_list_(0); | 135 |
| 136 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0); | 136 CodeRange::CodeRange() |
| 137 int CodeRange::current_allocation_block_index_ = 0; | 137 : code_range_(NULL), |
| 138 VirtualMemory* CodeRange::code_range_ = NULL; | 138 free_list_(0), |
| 139 allocation_list_(0), | |
| 140 current_allocation_block_index_(0), | |
| 141 isolate_(NULL) { | |
| 142 } | |
| 139 | 143 |
| 140 | 144 |
| 141 bool CodeRange::Setup(const size_t requested) { | 145 bool CodeRange::Setup(const size_t requested) { |
| 142 ASSERT(code_range_ == NULL); | 146 ASSERT(code_range_ == NULL); |
| 143 | 147 |
| 144 code_range_ = new VirtualMemory(requested); | 148 code_range_ = new VirtualMemory(requested); |
| 145 CHECK(code_range_ != NULL); | 149 CHECK(code_range_ != NULL); |
| 146 if (!code_range_->IsReserved()) { | 150 if (!code_range_->IsReserved()) { |
| 147 delete code_range_; | 151 delete code_range_; |
| 148 code_range_ = NULL; | 152 code_range_ = NULL; |
| 149 return false; | 153 return false; |
| 150 } | 154 } |
| 151 | 155 |
| 152 // We are sure that we have mapped a block of requested addresses. | 156 // We are sure that we have mapped a block of requested addresses. |
| 153 ASSERT(code_range_->size() == requested); | 157 ASSERT(code_range_->size() == requested); |
| 154 LOG(NewEvent("CodeRange", code_range_->address(), requested)); | 158 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); |
| 155 Address base = reinterpret_cast<Address>(code_range_->address()); | 159 Address base = reinterpret_cast<Address>(code_range_->address()); |
| 156 Address aligned_base = | 160 Address aligned_base = |
| 157 RoundUp(reinterpret_cast<Address>(code_range_->address()), | 161 RoundUp(reinterpret_cast<Address>(code_range_->address()), |
| 158 MemoryChunk::kAlignment); | 162 MemoryChunk::kAlignment); |
| 159 int size = code_range_->size() - (aligned_base - base); | 163 int size = code_range_->size() - (aligned_base - base); |
| 160 allocation_list_.Add(FreeBlock(aligned_base, size)); | 164 allocation_list_.Add(FreeBlock(aligned_base, size)); |
| 161 current_allocation_block_index_ = 0; | 165 current_allocation_block_index_ = 0; |
| 162 return true; | 166 return true; |
| 163 } | 167 } |
| 164 | 168 |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 257 delete code_range_; // Frees all memory in the virtual memory range. | 261 delete code_range_; // Frees all memory in the virtual memory range. |
| 258 code_range_ = NULL; | 262 code_range_ = NULL; |
| 259 free_list_.Free(); | 263 free_list_.Free(); |
| 260 allocation_list_.Free(); | 264 allocation_list_.Free(); |
| 261 } | 265 } |
| 262 | 266 |
| 263 | 267 |
| 264 // ----------------------------------------------------------------------------- | 268 // ----------------------------------------------------------------------------- |
| 265 // MemoryAllocator | 269 // MemoryAllocator |
| 266 // | 270 // |
| 267 size_t MemoryAllocator::capacity_ = 0; | |
| 268 size_t MemoryAllocator::capacity_executable_ = 0; | |
| 269 size_t MemoryAllocator::size_ = 0; | |
| 270 size_t MemoryAllocator::size_executable_ = 0; | |
| 271 | |
| 272 List<MemoryAllocator::MemoryAllocationCallbackRegistration> | |
| 273 MemoryAllocator::memory_allocation_callbacks_; | |
| 274 | 271 |
| 275 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) { | 272 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) { |
| 276 capacity_ = RoundUp(capacity, Page::kPageSize); | 273 capacity_ = RoundUp(capacity, Page::kPageSize); |
| 277 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); | 274 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); |
| 278 ASSERT_GE(capacity_, capacity_executable_); | 275 ASSERT_GE(capacity_, capacity_executable_); |
| 279 | 276 |
| 280 size_ = 0; | 277 size_ = 0; |
| 281 size_executable_ = 0; | 278 size_executable_ = 0; |
| 282 | 279 |
| 283 return true; | 280 return true; |
| 284 } | 281 } |
| 285 | 282 |
| 286 | 283 |
| 287 void MemoryAllocator::TearDown() { | 284 void MemoryAllocator::TearDown() { |
| 288 // Check that spaces were teared down before MemoryAllocator. | 285 // Check that spaces were teared down before MemoryAllocator. |
| 289 ASSERT(size_ == 0); | 286 ASSERT(size_ == 0); |
| 290 ASSERT(size_executable_ == 0); | 287 ASSERT(size_executable_ == 0); |
| 291 capacity_ = 0; | 288 capacity_ = 0; |
| 292 capacity_executable_ = 0; | 289 capacity_executable_ = 0; |
| 293 } | 290 } |
| 294 | 291 |
| 295 | 292 |
| 296 void MemoryAllocator::FreeMemory(Address base, | 293 void MemoryAllocator::FreeMemory(Address base, |
| 297 size_t size, | 294 size_t size, |
| 298 Executability executable) { | 295 Executability executable) { |
| 299 if (CodeRange::contains(static_cast<Address>(base))) { | 296 // TODO(gc) make code_range part of memory allocator? |
| 297 if (isolate_->code_range()->contains(static_cast<Address>(base))) { | |
| 300 ASSERT(executable == EXECUTABLE); | 298 ASSERT(executable == EXECUTABLE); |
| 301 CodeRange::FreeRawMemory(base, size); | 299 isolate_->code_range()->FreeRawMemory(base, size); |
| 302 } else { | 300 } else { |
| 303 ASSERT(executable == NOT_EXECUTABLE || !CodeRange::exists()); | 301 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); |
| 304 VirtualMemory::ReleaseRegion(base, size); | 302 VirtualMemory::ReleaseRegion(base, size); |
| 305 } | 303 } |
| 306 | 304 |
| 307 Counters::memory_allocated.Decrement(static_cast<int>(size)); | 305 COUNTERS->memory_allocated()->Decrement(static_cast<int>(size)); |
| 308 | 306 |
| 309 ASSERT(size_ >= size); | 307 ASSERT(size_ >= size); |
| 310 size_ -= size; | 308 size_ -= size; |
| 311 | 309 |
| 312 if (executable == EXECUTABLE) { | 310 if (executable == EXECUTABLE) { |
| 313 ASSERT(size_executable_ >= size); | 311 ASSERT(size_executable_ >= size); |
| 314 size_executable_ -= size; | 312 size_executable_ -= size; |
| 315 } | 313 } |
| 316 } | 314 } |
| 317 | 315 |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 386 } | 384 } |
| 387 | 385 |
| 388 | 386 |
| 389 void Page::InitializeAsAnchor(PagedSpace* owner) { | 387 void Page::InitializeAsAnchor(PagedSpace* owner) { |
| 390 set_owner(owner); | 388 set_owner(owner); |
| 391 set_prev_page(this); | 389 set_prev_page(this); |
| 392 set_next_page(this); | 390 set_next_page(this); |
| 393 } | 391 } |
| 394 | 392 |
| 395 | 393 |
| 396 MemoryChunk* MemoryChunk::Initialize(Address base, | 394 MemoryChunk* MemoryChunk::Initialize(Heap* heap, |
| 395 Address base, | |
| 397 size_t size, | 396 size_t size, |
| 398 Executability executable, | 397 Executability executable, |
| 399 Space* owner) { | 398 Space* owner) { |
| 400 MemoryChunk* chunk = FromAddress(base); | 399 MemoryChunk* chunk = FromAddress(base); |
| 401 | 400 |
| 402 ASSERT(base == chunk->address()); | 401 ASSERT(base == chunk->address()); |
| 403 | 402 |
| 403 chunk->heap_ = heap; | |
| 404 chunk->size_ = size; | 404 chunk->size_ = size; |
| 405 chunk->flags_ = 0; | 405 chunk->flags_ = 0; |
| 406 chunk->set_owner(owner); | 406 chunk->set_owner(owner); |
| 407 chunk->markbits()->Clear(); | 407 chunk->markbits()->Clear(); |
| 408 chunk->set_scan_on_scavenge(false); | 408 chunk->set_scan_on_scavenge(false); |
| 409 | 409 |
| 410 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); | 410 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); |
| 411 | 411 |
| 412 if (owner == Heap::old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA); | 412 if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA); |
| 413 | 413 |
| 414 return chunk; | 414 return chunk; |
| 415 } | 415 } |
| 416 | 416 |
| 417 | 417 |
| 418 void MemoryChunk::InsertAfter(MemoryChunk* other) { | 418 void MemoryChunk::InsertAfter(MemoryChunk* other) { |
| 419 next_chunk_ = other->next_chunk_; | 419 next_chunk_ = other->next_chunk_; |
| 420 prev_chunk_ = other; | 420 prev_chunk_ = other; |
| 421 other->next_chunk_->prev_chunk_ = this; | 421 other->next_chunk_->prev_chunk_ = this; |
| 422 other->next_chunk_ = this; | 422 other->next_chunk_ = this; |
| 423 } | 423 } |
| 424 | 424 |
| 425 | 425 |
| 426 void MemoryChunk::Unlink() { | 426 void MemoryChunk::Unlink() { |
| 427 next_chunk_->prev_chunk_ = prev_chunk_; | 427 next_chunk_->prev_chunk_ = prev_chunk_; |
| 428 prev_chunk_->next_chunk_ = next_chunk_; | 428 prev_chunk_->next_chunk_ = next_chunk_; |
| 429 prev_chunk_ = NULL; | 429 prev_chunk_ = NULL; |
| 430 next_chunk_ = NULL; | 430 next_chunk_ = NULL; |
| 431 } | 431 } |
| 432 | 432 |
| 433 | 433 |
| 434 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, | 434 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
| 435 Executability executable, | 435 Executability executable, |
| 436 Space* owner) { | 436 Space* owner) { |
| 437 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; | 437 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; |
| 438 Address base = NULL; | 438 Address base = NULL; |
| 439 if (executable == EXECUTABLE) { | 439 if (executable == EXECUTABLE) { |
| 440 // Check executable memory limit. | 440 // Check executable memory limit. |
| 441 if (size_executable_ + chunk_size > capacity_executable_) { | 441 if (size_executable_ + chunk_size > capacity_executable_) { |
| 442 LOG(StringEvent("MemoryAllocator::AllocateRawMemory", | 442 LOG(isolate_, |
| 443 StringEvent("MemoryAllocator::AllocateRawMemory", | |
| 443 "V8 Executable Allocation capacity exceeded")); | 444 "V8 Executable Allocation capacity exceeded")); |
| 444 return NULL; | 445 return NULL; |
| 445 } | 446 } |
| 446 | 447 |
| 447 // Allocate executable memory either from code range or from the | 448 // Allocate executable memory either from code range or from the |
| 448 // OS. | 449 // OS. |
| 449 if (CodeRange::exists()) { | 450 if (isolate_->code_range()->exists()) { |
| 450 base = CodeRange::AllocateRawMemory(chunk_size, &chunk_size); | 451 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); |
| 451 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), | 452 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
| 452 MemoryChunk::kAlignment)); | 453 MemoryChunk::kAlignment)); |
| 453 size_ += chunk_size; | 454 size_ += chunk_size; |
| 454 } else { | 455 } else { |
| 455 base = AllocateAlignedMemory(chunk_size, | 456 base = AllocateAlignedMemory(chunk_size, |
| 456 MemoryChunk::kAlignment, | 457 MemoryChunk::kAlignment, |
| 457 executable, | 458 executable, |
| 458 &chunk_size); | 459 &chunk_size); |
| 459 } | 460 } |
| 460 | 461 |
| 461 if (base == NULL) return NULL; | 462 if (base == NULL) return NULL; |
| 462 | 463 |
| 463 // Update executable memory size. | 464 // Update executable memory size. |
| 464 size_executable_ += chunk_size; | 465 size_executable_ += chunk_size; |
| 465 } else { | 466 } else { |
| 466 base = AllocateAlignedMemory(chunk_size, | 467 base = AllocateAlignedMemory(chunk_size, |
| 467 MemoryChunk::kAlignment, | 468 MemoryChunk::kAlignment, |
| 468 executable, | 469 executable, |
| 469 &chunk_size); | 470 &chunk_size); |
| 470 | 471 |
| 471 if (base == NULL) return NULL; | 472 if (base == NULL) return NULL; |
| 472 } | 473 } |
| 473 | 474 |
| 474 #ifdef DEBUG | 475 #ifdef DEBUG |
| 475 ZapBlock(base, chunk_size); | 476 ZapBlock(base, chunk_size); |
| 476 #endif | 477 #endif |
| 477 Counters::memory_allocated.Increment(chunk_size); | 478 COUNTERS->memory_allocated()->Increment(chunk_size); |
| 478 | 479 |
| 479 LOG(NewEvent("MemoryChunk", base, chunk_size)); | 480 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
| 480 if (owner != NULL) { | 481 if (owner != NULL) { |
| 481 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | 482 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
| 482 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); | 483 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
| 483 } | 484 } |
| 484 | 485 |
| 485 return MemoryChunk::Initialize(base, chunk_size, executable, owner); | 486 return MemoryChunk::Initialize(isolate_->heap(), |
| 487 base, | |
| 488 chunk_size, | |
| 489 executable, | |
| 490 owner); | |
| 486 } | 491 } |
| 487 | 492 |
| 488 | 493 |
| 489 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, | 494 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, |
| 490 Executability executable) { | 495 Executability executable) { |
| 491 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); | 496 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); |
| 492 | 497 |
| 493 if (chunk == NULL) return NULL; | 498 if (chunk == NULL) return NULL; |
| 494 | 499 |
| 495 return Page::Initialize(chunk, executable, owner); | 500 return Page::Initialize(isolate_->heap(), chunk, executable, owner); |
| 496 } | 501 } |
| 497 | 502 |
| 498 | 503 |
| 499 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 504 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 500 Executability executable, | 505 Executability executable, |
| 501 Space* owner) { | 506 Space* owner) { |
| 502 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); | 507 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); |
| 503 if (chunk == NULL) return NULL; | 508 if (chunk == NULL) return NULL; |
| 504 return LargePage::Initialize(chunk); | 509 return LargePage::Initialize(isolate_->heap(), chunk); |
| 505 } | 510 } |
| 506 | 511 |
| 507 | 512 |
| 508 void MemoryAllocator::Free(MemoryChunk* chunk) { | 513 void MemoryAllocator::Free(MemoryChunk* chunk) { |
| 509 LOG(DeleteEvent("MemoryChunk", chunk)); | 514 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 510 if (chunk->owner() != NULL) { | 515 if (chunk->owner() != NULL) { |
| 511 ObjectSpace space = | 516 ObjectSpace space = |
| 512 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | 517 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
| 513 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); | 518 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
| 514 } | 519 } |
| 515 | 520 |
| 516 FreeMemory(chunk->address(), | 521 FreeMemory(chunk->address(), |
| 517 chunk->size(), | 522 chunk->size(), |
| 518 chunk->executable()); | 523 chunk->executable()); |
| 519 } | 524 } |
| 520 | 525 |
| 521 | 526 |
| 522 bool MemoryAllocator::CommitBlock(Address start, | 527 bool MemoryAllocator::CommitBlock(Address start, |
| 523 size_t size, | 528 size_t size, |
| 524 Executability executable) { | 529 Executability executable) { |
| 525 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; | 530 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; |
| 526 #ifdef DEBUG | 531 #ifdef DEBUG |
| 527 ZapBlock(start, size); | 532 ZapBlock(start, size); |
| 528 #endif | 533 #endif |
| 529 Counters::memory_allocated.Increment(static_cast<int>(size)); | 534 COUNTERS->memory_allocated()->Increment(static_cast<int>(size)); |
| 530 return true; | 535 return true; |
| 531 } | 536 } |
| 532 | 537 |
| 533 | 538 |
| 534 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { | 539 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { |
| 535 if (!VirtualMemory::UncommitRegion(start, size)) return false; | 540 if (!VirtualMemory::UncommitRegion(start, size)) return false; |
| 536 Counters::memory_allocated.Decrement(static_cast<int>(size)); | 541 COUNTERS->memory_allocated()->Decrement(static_cast<int>(size)); |
| 537 return true; | 542 return true; |
| 538 } | 543 } |
| 539 | 544 |
| 540 | 545 |
| 541 void MemoryAllocator::ZapBlock(Address start, size_t size) { | 546 void MemoryAllocator::ZapBlock(Address start, size_t size) { |
| 542 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { | 547 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { |
| 543 Memory::Address_at(start + s) = kZapValue; | 548 Memory::Address_at(start + s) = kZapValue; |
| 544 } | 549 } |
| 545 } | 550 } |
| 546 | 551 |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 597 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 602 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 598 ", used: %" V8_PTR_PREFIX "d" | 603 ", used: %" V8_PTR_PREFIX "d" |
| 599 ", available: %%%d\n\n", | 604 ", available: %%%d\n\n", |
| 600 capacity_, size_, static_cast<int>(pct*100)); | 605 capacity_, size_, static_cast<int>(pct*100)); |
| 601 } | 606 } |
| 602 #endif | 607 #endif |
| 603 | 608 |
| 604 // ----------------------------------------------------------------------------- | 609 // ----------------------------------------------------------------------------- |
| 605 // PagedSpace implementation | 610 // PagedSpace implementation |
| 606 | 611 |
| 607 PagedSpace::PagedSpace(intptr_t max_capacity, | 612 PagedSpace::PagedSpace(Heap* heap, |
| 613 intptr_t max_capacity, | |
| 608 AllocationSpace id, | 614 AllocationSpace id, |
| 609 Executability executable) | 615 Executability executable) |
| 610 : Space(id, executable), | 616 : Space(heap, id, executable), |
| 611 free_list_(this), | 617 free_list_(this), |
| 612 was_swept_conservatively_(false) { | 618 was_swept_conservatively_(false) { |
| 613 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 619 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
| 614 * Page::kObjectAreaSize; | 620 * Page::kObjectAreaSize; |
| 615 accounting_stats_.Clear(); | 621 accounting_stats_.Clear(); |
| 616 | 622 |
| 617 allocation_info_.top = NULL; | 623 allocation_info_.top = NULL; |
| 618 allocation_info_.limit = NULL; | 624 allocation_info_.limit = NULL; |
| 619 | 625 |
| 620 anchor_.InitializeAsAnchor(this); | 626 anchor_.InitializeAsAnchor(this); |
| 621 } | 627 } |
| 622 | 628 |
| 623 | 629 |
| 624 bool PagedSpace::Setup() { | 630 bool PagedSpace::Setup() { |
| 625 return true; | 631 return true; |
| 626 } | 632 } |
| 627 | 633 |
| 628 | 634 |
| 629 bool PagedSpace::HasBeenSetup() { | 635 bool PagedSpace::HasBeenSetup() { |
| 630 return true; | 636 return true; |
| 631 } | 637 } |
| 632 | 638 |
| 633 | 639 |
| 634 void PagedSpace::TearDown() { | 640 void PagedSpace::TearDown() { |
| 635 PageIterator iterator(this); | 641 PageIterator iterator(this); |
| 636 while (iterator.has_next()) { | 642 while (iterator.has_next()) { |
| 637 MemoryAllocator::Free(iterator.next()); | 643 heap()->isolate()->memory_allocator()->Free(iterator.next()); |
| 638 } | 644 } |
| 639 anchor_.set_next_page(&anchor_); | 645 anchor_.set_next_page(&anchor_); |
| 640 anchor_.set_prev_page(&anchor_); | 646 anchor_.set_prev_page(&anchor_); |
| 641 accounting_stats_.Clear(); | 647 accounting_stats_.Clear(); |
| 642 } | 648 } |
| 643 | 649 |
| 644 | 650 |
| 645 #ifdef ENABLE_HEAP_PROTECTION | 651 #ifdef ENABLE_HEAP_PROTECTION |
| 646 | 652 |
| 647 void PagedSpace::Protect() { | 653 void PagedSpace::Protect() { |
| 648 Page* page = first_page_; | 654 Page* page = first_page_; |
| 649 while (page->is_valid()) { | 655 while (page->is_valid()) { |
| 650 MemoryAllocator::ProtectChunkFromPage(page); | 656 Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page); |
| 651 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); | 657 page = Isolate::Current()->memory_allocator()-> |
| 658 FindLastPageInSameChunk(page)->next_page(); | |
| 652 } | 659 } |
| 653 } | 660 } |
| 654 | 661 |
| 655 | 662 |
| 656 void PagedSpace::Unprotect() { | 663 void PagedSpace::Unprotect() { |
| 657 Page* page = first_page_; | 664 Page* page = first_page_; |
| 658 while (page->is_valid()) { | 665 while (page->is_valid()) { |
| 659 MemoryAllocator::UnprotectChunkFromPage(page); | 666 Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page); |
| 660 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); | 667 page = Isolate::Current()->memory_allocator()-> |
| 668 FindLastPageInSameChunk(page)->next_page(); | |
| 661 } | 669 } |
| 662 } | 670 } |
| 663 | 671 |
| 664 #endif | 672 #endif |
| 665 | 673 |
| 666 | 674 |
| 667 MaybeObject* PagedSpace::FindObject(Address addr) { | 675 MaybeObject* PagedSpace::FindObject(Address addr) { |
| 668 // Note: this function can only be called on precisely swept spaces. | 676 // Note: this function can only be called on precisely swept spaces. |
| 669 ASSERT(!MarkCompactCollector::in_use()); | 677 ASSERT(!heap()->mark_compact_collector()->in_use()); |
| 670 | 678 |
| 671 if (!Contains(addr)) return Failure::Exception(); | 679 if (!Contains(addr)) return Failure::Exception(); |
| 672 | 680 |
| 673 Page* p = Page::FromAddress(addr); | 681 Page* p = Page::FromAddress(addr); |
| 674 HeapObjectIterator it(p, NULL); | 682 HeapObjectIterator it(p, NULL); |
| 675 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 683 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 676 Address cur = obj->address(); | 684 Address cur = obj->address(); |
| 677 Address next = cur + obj->Size(); | 685 Address next = cur + obj->Size(); |
| 678 if ((cur <= addr) && (addr < next)) return obj; | 686 if ((cur <= addr) && (addr < next)) return obj; |
| 679 } | 687 } |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 695 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); | 703 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); |
| 696 ASSERT(Capacity() % Page::kObjectAreaSize == 0); | 704 ASSERT(Capacity() % Page::kObjectAreaSize == 0); |
| 697 | 705 |
| 698 if (Capacity() == max_capacity_) return false; | 706 if (Capacity() == max_capacity_) return false; |
| 699 | 707 |
| 700 ASSERT(Capacity() < max_capacity_); | 708 ASSERT(Capacity() < max_capacity_); |
| 701 | 709 |
| 702 // Are we going to exceed capacity for this space? | 710 // Are we going to exceed capacity for this space? |
| 703 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; | 711 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; |
| 704 | 712 |
| 705 Page* p = MemoryAllocator::AllocatePage(this, executable()); | 713 Page* p = heap()->isolate()->memory_allocator()-> |
| 714 AllocatePage(this, executable()); | |
| 706 if (p == NULL) return false; | 715 if (p == NULL) return false; |
| 707 | 716 |
| 708 ASSERT(Capacity() <= max_capacity_); | 717 ASSERT(Capacity() <= max_capacity_); |
| 709 | 718 |
| 710 p->InsertAfter(anchor_.prev_page()); | 719 p->InsertAfter(anchor_.prev_page()); |
| 711 | 720 |
| 712 return true; | 721 return true; |
| 713 } | 722 } |
| 714 | 723 |
| 715 | 724 |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 763 HeapObjectIterator it(page, NULL); | 772 HeapObjectIterator it(page, NULL); |
| 764 Address end_of_previous_object = page->ObjectAreaStart(); | 773 Address end_of_previous_object = page->ObjectAreaStart(); |
| 765 Address top = page->ObjectAreaEnd(); | 774 Address top = page->ObjectAreaEnd(); |
| 766 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { | 775 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { |
| 767 ASSERT(end_of_previous_object <= object->address()); | 776 ASSERT(end_of_previous_object <= object->address()); |
| 768 | 777 |
| 769 // The first word should be a map, and we expect all map pointers to | 778 // The first word should be a map, and we expect all map pointers to |
| 770 // be in map space. | 779 // be in map space. |
| 771 Map* map = object->map(); | 780 Map* map = object->map(); |
| 772 ASSERT(map->IsMap()); | 781 ASSERT(map->IsMap()); |
| 773 ASSERT(Heap::map_space()->Contains(map)); | 782 ASSERT(heap()->map_space()->Contains(map)); |
| 774 | 783 |
| 775 // Perform space-specific object verification. | 784 // Perform space-specific object verification. |
| 776 VerifyObject(object); | 785 VerifyObject(object); |
| 777 | 786 |
| 778 // The object itself should look OK. | 787 // The object itself should look OK. |
| 779 object->Verify(); | 788 object->Verify(); |
| 780 | 789 |
| 781 // All the interior pointers should be contained in the heap. | 790 // All the interior pointers should be contained in the heap. |
| 782 int size = object->Size(); | 791 int size = object->Size(); |
| 783 object->IterateBody(map->instance_type(), size, visitor); | 792 object->IterateBody(map->instance_type(), size, visitor); |
| 784 | 793 |
| 785 ASSERT(object->address() + size <= top); | 794 ASSERT(object->address() + size <= top); |
| 786 end_of_previous_object = object->address() + size; | 795 end_of_previous_object = object->address() + size; |
| 787 } | 796 } |
| 788 } | 797 } |
| 789 } | 798 } |
| 790 #endif | 799 #endif |
| 791 | 800 |
| 792 | 801 |
| 793 // ----------------------------------------------------------------------------- | 802 // ----------------------------------------------------------------------------- |
| 794 // NewSpace implementation | 803 // NewSpace implementation |
| 795 | 804 |
| 796 | 805 |
| 797 bool NewSpace::Setup(int maximum_semispace_capacity) { | 806 bool NewSpace::Setup(int maximum_semispace_capacity) { |
| 798 // Setup new space based on the preallocated memory block defined by | 807 // Setup new space based on the preallocated memory block defined by |
| 799 // start and size. The provided space is divided into two semi-spaces. | 808 // start and size. The provided space is divided into two semi-spaces. |
| 800 // To support fast containment testing in the new space, the size of | 809 // To support fast containment testing in the new space, the size of |
| 801 // this chunk must be a power of two and it must be aligned to its size. | 810 // this chunk must be a power of two and it must be aligned to its size. |
| 802 int initial_semispace_capacity = Heap::InitialSemiSpaceSize(); | 811 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); |
| 803 | 812 |
| 804 size_t size = 0; | 813 size_t size = 0; |
| 805 Address base = | 814 Address base = |
| 806 MemoryAllocator::ReserveAlignedMemory(2 * maximum_semispace_capacity, | 815 heap()->isolate()->memory_allocator()->ReserveAlignedMemory( |
| 807 2 * maximum_semispace_capacity, | 816 2 * maximum_semispace_capacity, |
| 808 &size); | 817 2 * maximum_semispace_capacity, |
| 818 &size); | |
| 809 | 819 |
| 810 if (base == NULL) return false; | 820 if (base == NULL) return false; |
| 811 | 821 |
| 812 chunk_base_ = base; | 822 chunk_base_ = base; |
| 813 chunk_size_ = static_cast<uintptr_t>(size); | 823 chunk_size_ = static_cast<uintptr_t>(size); |
| 814 LOG(NewEvent("InitialChunk", chunk_base_, chunk_size_)); | 824 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); |
| 815 | 825 |
| 816 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); | 826 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); |
| 817 ASSERT(IsPowerOf2(maximum_semispace_capacity)); | 827 ASSERT(IsPowerOf2(maximum_semispace_capacity)); |
| 818 | 828 |
| 819 // Allocate and setup the histogram arrays if necessary. | 829 // Allocate and setup the histogram arrays if necessary. |
| 820 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 830 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
| 821 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 831 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 822 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 832 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 823 | 833 |
| 824 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ | 834 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ |
| 825 promoted_histogram_[name].set_name(#name); | 835 promoted_histogram_[name].set_name(#name); |
| 826 INSTANCE_TYPE_LIST(SET_NAME) | 836 INSTANCE_TYPE_LIST(SET_NAME) |
| 827 #undef SET_NAME | 837 #undef SET_NAME |
| 828 #endif | 838 #endif |
| 829 | 839 |
| 830 ASSERT(maximum_semispace_capacity == Heap::ReservedSemiSpaceSize()); | 840 ASSERT(maximum_semispace_capacity == heap()->ReservedSemiSpaceSize()); |
| 831 ASSERT(static_cast<intptr_t>(chunk_size_) >= | 841 ASSERT(static_cast<intptr_t>(chunk_size_) >= |
| 832 2 * Heap::ReservedSemiSpaceSize()); | 842 2 * heap()->ReservedSemiSpaceSize()); |
| 833 ASSERT(IsAddressAligned(chunk_base_, 2 * maximum_semispace_capacity, 0)); | 843 ASSERT(IsAddressAligned(chunk_base_, 2 * maximum_semispace_capacity, 0)); |
| 834 | 844 |
| 835 if (!to_space_.Setup(chunk_base_, | 845 if (!to_space_.Setup(chunk_base_, |
| 836 initial_semispace_capacity, | 846 initial_semispace_capacity, |
| 837 maximum_semispace_capacity)) { | 847 maximum_semispace_capacity)) { |
| 838 return false; | 848 return false; |
| 839 } | 849 } |
| 840 if (!from_space_.Setup(chunk_base_ + maximum_semispace_capacity, | 850 if (!from_space_.Setup(chunk_base_ + maximum_semispace_capacity, |
| 841 initial_semispace_capacity, | 851 initial_semispace_capacity, |
| 842 maximum_semispace_capacity)) { | 852 maximum_semispace_capacity)) { |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 866 } | 876 } |
| 867 #endif | 877 #endif |
| 868 | 878 |
| 869 start_ = NULL; | 879 start_ = NULL; |
| 870 allocation_info_.top = NULL; | 880 allocation_info_.top = NULL; |
| 871 allocation_info_.limit = NULL; | 881 allocation_info_.limit = NULL; |
| 872 | 882 |
| 873 to_space_.TearDown(); | 883 to_space_.TearDown(); |
| 874 from_space_.TearDown(); | 884 from_space_.TearDown(); |
| 875 | 885 |
| 876 LOG(DeleteEvent("InitialChunk", chunk_base_)); | 886 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); |
| 877 MemoryAllocator::FreeMemory(chunk_base_, | 887 heap()->isolate()->memory_allocator()->FreeMemory( |
| 878 static_cast<size_t>(chunk_size_), | 888 chunk_base_, |
| 879 NOT_EXECUTABLE); | 889 static_cast<size_t>(chunk_size_), |
| 890 NOT_EXECUTABLE); | |
| 880 chunk_base_ = NULL; | 891 chunk_base_ = NULL; |
| 881 chunk_size_ = 0; | 892 chunk_size_ = 0; |
| 882 } | 893 } |
| 883 | 894 |
| 884 | 895 |
| 885 #ifdef ENABLE_HEAP_PROTECTION | 896 #ifdef ENABLE_HEAP_PROTECTION |
| 886 | 897 |
| 887 void NewSpace::Protect() { | 898 void NewSpace::Protect() { |
| 888 MemoryAllocator::Protect(ToSpaceLow(), Capacity()); | 899 heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity()); |
| 889 MemoryAllocator::Protect(FromSpaceLow(), Capacity()); | 900 heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity()); |
| 890 } | 901 } |
| 891 | 902 |
| 892 | 903 |
| 893 void NewSpace::Unprotect() { | 904 void NewSpace::Unprotect() { |
| 894 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(), | 905 heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(), |
| 895 to_space_.executable()); | 906 to_space_.executable()); |
| 896 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(), | 907 heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(), |
| 897 from_space_.executable()); | 908 from_space_.executable()); |
| 898 } | 909 } |
| 899 | 910 |
| 900 #endif | 911 #endif |
| 901 | 912 |
| 902 | 913 |
| 903 void NewSpace::Flip() { | 914 void NewSpace::Flip() { |
| 904 SemiSpace tmp = from_space_; | 915 SemiSpace tmp = from_space_; |
| 905 from_space_ = to_space_; | 916 from_space_ = to_space_; |
| 906 to_space_ = tmp; | 917 to_space_ = tmp; |
| 907 } | 918 } |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 965 // There should be objects packed in from the low address up to the | 976 // There should be objects packed in from the low address up to the |
| 966 // allocation pointer. | 977 // allocation pointer. |
| 967 Address current = to_space_.low(); | 978 Address current = to_space_.low(); |
| 968 while (current < top()) { | 979 while (current < top()) { |
| 969 HeapObject* object = HeapObject::FromAddress(current); | 980 HeapObject* object = HeapObject::FromAddress(current); |
| 970 | 981 |
| 971 // The first word should be a map, and we expect all map pointers to | 982 // The first word should be a map, and we expect all map pointers to |
| 972 // be in map space. | 983 // be in map space. |
| 973 Map* map = object->map(); | 984 Map* map = object->map(); |
| 974 ASSERT(map->IsMap()); | 985 ASSERT(map->IsMap()); |
| 975 ASSERT(Heap::map_space()->Contains(map)); | 986 ASSERT(heap()->map_space()->Contains(map)); |
| 976 | 987 |
| 977 // The object should not be code or a map. | 988 // The object should not be code or a map. |
| 978 ASSERT(!object->IsMap()); | 989 ASSERT(!object->IsMap()); |
| 979 ASSERT(!object->IsCode()); | 990 ASSERT(!object->IsCode()); |
| 980 | 991 |
| 981 // The object itself should look OK. | 992 // The object itself should look OK. |
| 982 object->Verify(); | 993 object->Verify(); |
| 983 | 994 |
| 984 // All the interior pointers should be contained in the heap. | 995 // All the interior pointers should be contained in the heap. |
| 985 VerifyPointersVisitor visitor; | 996 VerifyPointersVisitor visitor; |
| 986 int size = object->Size(); | 997 int size = object->Size(); |
| 987 object->IterateBody(map->instance_type(), size, &visitor); | 998 object->IterateBody(map->instance_type(), size, &visitor); |
| 988 | 999 |
| 989 current += size; | 1000 current += size; |
| 990 } | 1001 } |
| 991 | 1002 |
| 992 // The allocation pointer should not be in the middle of an object. | 1003 // The allocation pointer should not be in the middle of an object. |
| 993 ASSERT(current == top()); | 1004 ASSERT(current == top()); |
| 994 } | 1005 } |
| 995 #endif | 1006 #endif |
| 996 | 1007 |
| 997 | 1008 |
| 998 bool SemiSpace::Commit() { | 1009 bool SemiSpace::Commit() { |
| 999 ASSERT(!is_committed()); | 1010 ASSERT(!is_committed()); |
| 1000 if (!MemoryAllocator::CommitBlock(start_, capacity_, executable())) { | 1011 if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1012 start_, capacity_, executable())) { | |
| 1001 return false; | 1013 return false; |
| 1002 } | 1014 } |
| 1003 committed_ = true; | 1015 committed_ = true; |
| 1004 return true; | 1016 return true; |
| 1005 } | 1017 } |
| 1006 | 1018 |
| 1007 | 1019 |
| 1008 bool SemiSpace::Uncommit() { | 1020 bool SemiSpace::Uncommit() { |
| 1009 ASSERT(is_committed()); | 1021 ASSERT(is_committed()); |
| 1010 if (!MemoryAllocator::UncommitBlock(start_, capacity_)) { | 1022 if (!heap()->isolate()->memory_allocator()->UncommitBlock( |
| 1023 start_, capacity_)) { | |
| 1011 return false; | 1024 return false; |
| 1012 } | 1025 } |
| 1013 committed_ = false; | 1026 committed_ = false; |
| 1014 return true; | 1027 return true; |
| 1015 } | 1028 } |
| 1016 | 1029 |
| 1017 | 1030 |
| 1018 // ----------------------------------------------------------------------------- | 1031 // ----------------------------------------------------------------------------- |
| 1019 // SemiSpace implementation | 1032 // SemiSpace implementation |
| 1020 | 1033 |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 1046 start_ = NULL; | 1059 start_ = NULL; |
| 1047 capacity_ = 0; | 1060 capacity_ = 0; |
| 1048 } | 1061 } |
| 1049 | 1062 |
| 1050 | 1063 |
| 1051 bool SemiSpace::Grow() { | 1064 bool SemiSpace::Grow() { |
| 1052 // Double the semispace size but only up to maximum capacity. | 1065 // Double the semispace size but only up to maximum capacity. |
| 1053 int maximum_extra = maximum_capacity_ - capacity_; | 1066 int maximum_extra = maximum_capacity_ - capacity_; |
| 1054 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())), | 1067 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())), |
| 1055 maximum_extra); | 1068 maximum_extra); |
| 1056 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) { | 1069 if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1070 high(), extra, executable())) { | |
| 1057 return false; | 1071 return false; |
| 1058 } | 1072 } |
| 1059 capacity_ += extra; | 1073 capacity_ += extra; |
| 1060 return true; | 1074 return true; |
| 1061 } | 1075 } |
| 1062 | 1076 |
| 1063 | 1077 |
| 1064 bool SemiSpace::GrowTo(int new_capacity) { | 1078 bool SemiSpace::GrowTo(int new_capacity) { |
| 1065 ASSERT(new_capacity <= maximum_capacity_); | 1079 ASSERT(new_capacity <= maximum_capacity_); |
| 1066 ASSERT(new_capacity > capacity_); | 1080 ASSERT(new_capacity > capacity_); |
| 1067 size_t delta = new_capacity - capacity_; | 1081 size_t delta = new_capacity - capacity_; |
| 1068 ASSERT(IsAligned(delta, OS::AllocateAlignment())); | 1082 ASSERT(IsAligned(delta, OS::AllocateAlignment())); |
| 1069 if (!MemoryAllocator::CommitBlock(high(), delta, executable())) { | 1083 if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1084 high(), delta, executable())) { | |
| 1070 return false; | 1085 return false; |
| 1071 } | 1086 } |
| 1072 capacity_ = new_capacity; | 1087 capacity_ = new_capacity; |
| 1073 return true; | 1088 return true; |
| 1074 } | 1089 } |
| 1075 | 1090 |
| 1076 | 1091 |
| 1077 bool SemiSpace::ShrinkTo(int new_capacity) { | 1092 bool SemiSpace::ShrinkTo(int new_capacity) { |
| 1078 ASSERT(new_capacity >= initial_capacity_); | 1093 ASSERT(new_capacity >= initial_capacity_); |
| 1079 ASSERT(new_capacity < capacity_); | 1094 ASSERT(new_capacity < capacity_); |
| 1080 size_t delta = capacity_ - new_capacity; | 1095 size_t delta = capacity_ - new_capacity; |
| 1081 ASSERT(IsAligned(delta, OS::AllocateAlignment())); | 1096 ASSERT(IsAligned(delta, OS::AllocateAlignment())); |
| 1082 if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) { | 1097 if (!heap()->isolate()->memory_allocator()->UncommitBlock( |
| 1098 high() - delta, delta)) { | |
| 1083 return false; | 1099 return false; |
| 1084 } | 1100 } |
| 1085 capacity_ = new_capacity; | 1101 capacity_ = new_capacity; |
| 1086 return true; | 1102 return true; |
| 1087 } | 1103 } |
| 1088 | 1104 |
| 1089 | 1105 |
| 1090 #ifdef DEBUG | 1106 #ifdef DEBUG |
| 1091 void SemiSpace::Print() { } | 1107 void SemiSpace::Print() { } |
| 1092 | 1108 |
| (...skipping 27 matching lines...) Expand all Loading... | |
| 1120 ASSERT(space->ToSpaceLow() <= end | 1136 ASSERT(space->ToSpaceLow() <= end |
| 1121 && end <= space->ToSpaceHigh()); | 1137 && end <= space->ToSpaceHigh()); |
| 1122 space_ = &space->to_space_; | 1138 space_ = &space->to_space_; |
| 1123 current_ = start; | 1139 current_ = start; |
| 1124 limit_ = end; | 1140 limit_ = end; |
| 1125 size_func_ = size_func; | 1141 size_func_ = size_func; |
| 1126 } | 1142 } |
| 1127 | 1143 |
| 1128 | 1144 |
| 1129 #ifdef DEBUG | 1145 #ifdef DEBUG |
| 1130 // A static array of histogram info for each type. | |
| 1131 static HistogramInfo heap_histograms[LAST_TYPE+1]; | |
| 1132 static JSObject::SpillInformation js_spill_information; | |
| 1133 | |
| 1134 // heap_histograms is shared, always clear it before using it. | 1146 // heap_histograms is shared, always clear it before using it. |
| 1135 static void ClearHistograms() { | 1147 static void ClearHistograms() { |
| 1148 Isolate* isolate = Isolate::Current(); | |
| 1136 // We reset the name each time, though it hasn't changed. | 1149 // We reset the name each time, though it hasn't changed. |
| 1137 #define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name); | 1150 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name); |
| 1138 INSTANCE_TYPE_LIST(DEF_TYPE_NAME) | 1151 INSTANCE_TYPE_LIST(DEF_TYPE_NAME) |
| 1139 #undef DEF_TYPE_NAME | 1152 #undef DEF_TYPE_NAME |
| 1140 | 1153 |
| 1141 #define CLEAR_HISTOGRAM(name) heap_histograms[name].clear(); | 1154 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear(); |
| 1142 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM) | 1155 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM) |
| 1143 #undef CLEAR_HISTOGRAM | 1156 #undef CLEAR_HISTOGRAM |
| 1144 | 1157 |
| 1145 js_spill_information.Clear(); | 1158 isolate->js_spill_information()->Clear(); |
| 1146 } | 1159 } |
| 1147 | 1160 |
| 1148 | 1161 |
| 1149 static int code_kind_statistics[Code::NUMBER_OF_KINDS]; | |
| 1150 | |
| 1151 | |
| 1152 static void ClearCodeKindStatistics() { | 1162 static void ClearCodeKindStatistics() { |
| 1163 Isolate* isolate = Isolate::Current(); | |
| 1153 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { | 1164 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { |
| 1154 code_kind_statistics[i] = 0; | 1165 isolate->code_kind_statistics()[i] = 0; |
| 1155 } | 1166 } |
| 1156 } | 1167 } |
| 1157 | 1168 |
| 1158 | 1169 |
| 1159 static void ReportCodeKindStatistics() { | 1170 static void ReportCodeKindStatistics() { |
| 1171 Isolate* isolate = Isolate::Current(); | |
| 1160 const char* table[Code::NUMBER_OF_KINDS] = { NULL }; | 1172 const char* table[Code::NUMBER_OF_KINDS] = { NULL }; |
| 1161 | 1173 |
| 1162 #define CASE(name) \ | 1174 #define CASE(name) \ |
| 1163 case Code::name: table[Code::name] = #name; \ | 1175 case Code::name: table[Code::name] = #name; \ |
| 1164 break | 1176 break |
| 1165 | 1177 |
| 1166 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { | 1178 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { |
| 1167 switch (static_cast<Code::Kind>(i)) { | 1179 switch (static_cast<Code::Kind>(i)) { |
| 1168 CASE(FUNCTION); | 1180 CASE(FUNCTION); |
| 1169 CASE(OPTIMIZED_FUNCTION); | 1181 CASE(OPTIMIZED_FUNCTION); |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 1180 CASE(BINARY_OP_IC); | 1192 CASE(BINARY_OP_IC); |
| 1181 CASE(TYPE_RECORDING_BINARY_OP_IC); | 1193 CASE(TYPE_RECORDING_BINARY_OP_IC); |
| 1182 CASE(COMPARE_IC); | 1194 CASE(COMPARE_IC); |
| 1183 } | 1195 } |
| 1184 } | 1196 } |
| 1185 | 1197 |
| 1186 #undef CASE | 1198 #undef CASE |
| 1187 | 1199 |
| 1188 PrintF("\n Code kind histograms: \n"); | 1200 PrintF("\n Code kind histograms: \n"); |
| 1189 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { | 1201 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { |
| 1190 if (code_kind_statistics[i] > 0) { | 1202 if (isolate->code_kind_statistics()[i] > 0) { |
| 1191 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]); | 1203 PrintF(" %-20s: %10d bytes\n", table[i], |
| 1204 isolate->code_kind_statistics()[i]); | |
| 1192 } | 1205 } |
| 1193 } | 1206 } |
| 1194 PrintF("\n"); | 1207 PrintF("\n"); |
| 1195 } | 1208 } |
| 1196 | 1209 |
| 1197 | 1210 |
| 1198 static int CollectHistogramInfo(HeapObject* obj) { | 1211 static int CollectHistogramInfo(HeapObject* obj) { |
| 1212 Isolate* isolate = Isolate::Current(); | |
| 1199 InstanceType type = obj->map()->instance_type(); | 1213 InstanceType type = obj->map()->instance_type(); |
| 1200 ASSERT(0 <= type && type <= LAST_TYPE); | 1214 ASSERT(0 <= type && type <= LAST_TYPE); |
| 1201 ASSERT(heap_histograms[type].name() != NULL); | 1215 ASSERT(isolate->heap_histograms()[type].name() != NULL); |
| 1202 heap_histograms[type].increment_number(1); | 1216 isolate->heap_histograms()[type].increment_number(1); |
| 1203 heap_histograms[type].increment_bytes(obj->Size()); | 1217 isolate->heap_histograms()[type].increment_bytes(obj->Size()); |
| 1204 | 1218 |
| 1205 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { | 1219 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { |
| 1206 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information); | 1220 JSObject::cast(obj)->IncrementSpillStatistics( |
| 1221 isolate->js_spill_information()); | |
| 1207 } | 1222 } |
| 1208 | 1223 |
| 1209 return obj->Size(); | 1224 return obj->Size(); |
| 1210 } | 1225 } |
| 1211 | 1226 |
| 1212 | 1227 |
| 1213 static void ReportHistogram(bool print_spill) { | 1228 static void ReportHistogram(bool print_spill) { |
| 1229 Isolate* isolate = Isolate::Current(); | |
| 1214 PrintF("\n Object Histogram:\n"); | 1230 PrintF("\n Object Histogram:\n"); |
| 1215 for (int i = 0; i <= LAST_TYPE; i++) { | 1231 for (int i = 0; i <= LAST_TYPE; i++) { |
| 1216 if (heap_histograms[i].number() > 0) { | 1232 if (isolate->heap_histograms()[i].number() > 0) { |
| 1217 PrintF(" %-34s%10d (%10d bytes)\n", | 1233 PrintF(" %-34s%10d (%10d bytes)\n", |
| 1218 heap_histograms[i].name(), | 1234 isolate->heap_histograms()[i].name(), |
| 1219 heap_histograms[i].number(), | 1235 isolate->heap_histograms()[i].number(), |
| 1220 heap_histograms[i].bytes()); | 1236 isolate->heap_histograms()[i].bytes()); |
| 1221 } | 1237 } |
| 1222 } | 1238 } |
| 1223 PrintF("\n"); | 1239 PrintF("\n"); |
| 1224 | 1240 |
| 1225 // Summarize string types. | 1241 // Summarize string types. |
| 1226 int string_number = 0; | 1242 int string_number = 0; |
| 1227 int string_bytes = 0; | 1243 int string_bytes = 0; |
| 1228 #define INCREMENT(type, size, name, camel_name) \ | 1244 #define INCREMENT(type, size, name, camel_name) \ |
| 1229 string_number += heap_histograms[type].number(); \ | 1245 string_number += isolate->heap_histograms()[type].number(); \ |
| 1230 string_bytes += heap_histograms[type].bytes(); | 1246 string_bytes += isolate->heap_histograms()[type].bytes(); |
| 1231 STRING_TYPE_LIST(INCREMENT) | 1247 STRING_TYPE_LIST(INCREMENT) |
| 1232 #undef INCREMENT | 1248 #undef INCREMENT |
| 1233 if (string_number > 0) { | 1249 if (string_number > 0) { |
| 1234 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number, | 1250 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number, |
| 1235 string_bytes); | 1251 string_bytes); |
| 1236 } | 1252 } |
| 1237 | 1253 |
| 1238 if (FLAG_collect_heap_spill_statistics && print_spill) { | 1254 if (FLAG_collect_heap_spill_statistics && print_spill) { |
| 1239 js_spill_information.Print(); | 1255 isolate->js_spill_information()->Print(); |
| 1240 } | 1256 } |
| 1241 } | 1257 } |
| 1242 #endif // DEBUG | 1258 #endif // DEBUG |
| 1243 | 1259 |
| 1244 | 1260 |
| 1245 // Support for statistics gathering for --heap-stats and --log-gc. | 1261 // Support for statistics gathering for --heap-stats and --log-gc. |
| 1246 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1262 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
| 1247 void NewSpace::ClearHistograms() { | 1263 void NewSpace::ClearHistograms() { |
| 1248 for (int i = 0; i <= LAST_TYPE; i++) { | 1264 for (int i = 0; i <= LAST_TYPE; i++) { |
| 1249 allocated_histogram_[i].clear(); | 1265 allocated_histogram_[i].clear(); |
| 1250 promoted_histogram_[i].clear(); | 1266 promoted_histogram_[i].clear(); |
| 1251 } | 1267 } |
| 1252 } | 1268 } |
| 1253 | 1269 |
| 1254 // Because the copying collector does not touch garbage objects, we iterate | 1270 // Because the copying collector does not touch garbage objects, we iterate |
| 1255 // the new space before a collection to get a histogram of allocated objects. | 1271 // the new space before a collection to get a histogram of allocated objects. |
| 1256 // This only happens (1) when compiled with DEBUG and the --heap-stats flag is | 1272 // This only happens (1) when compiled with DEBUG and the --heap-stats flag is |
| 1257 // set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc | 1273 // set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc |
| 1258 // flag is set. | 1274 // flag is set. |
| 1259 void NewSpace::CollectStatistics() { | 1275 void NewSpace::CollectStatistics() { |
| 1260 ClearHistograms(); | 1276 ClearHistograms(); |
| 1261 SemiSpaceIterator it(this); | 1277 SemiSpaceIterator it(this); |
| 1262 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) | 1278 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) |
| 1263 RecordAllocation(obj); | 1279 RecordAllocation(obj); |
| 1264 } | 1280 } |
| 1265 | 1281 |
| 1266 | 1282 |
| 1267 #ifdef ENABLE_LOGGING_AND_PROFILING | 1283 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 1268 static void DoReportStatistics(HistogramInfo* info, const char* description) { | 1284 static void DoReportStatistics(Isolate* isolate, |
| 1269 LOG(HeapSampleBeginEvent("NewSpace", description)); | 1285 HistogramInfo* info, const char* description) { |
| 1286 LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); | |
| 1270 // Lump all the string types together. | 1287 // Lump all the string types together. |
| 1271 int string_number = 0; | 1288 int string_number = 0; |
| 1272 int string_bytes = 0; | 1289 int string_bytes = 0; |
| 1273 #define INCREMENT(type, size, name, camel_name) \ | 1290 #define INCREMENT(type, size, name, camel_name) \ |
| 1274 string_number += info[type].number(); \ | 1291 string_number += info[type].number(); \ |
| 1275 string_bytes += info[type].bytes(); | 1292 string_bytes += info[type].bytes(); |
| 1276 STRING_TYPE_LIST(INCREMENT) | 1293 STRING_TYPE_LIST(INCREMENT) |
| 1277 #undef INCREMENT | 1294 #undef INCREMENT |
| 1278 if (string_number > 0) { | 1295 if (string_number > 0) { |
| 1279 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); | 1296 LOG(isolate, |
| 1297 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); | |
| 1280 } | 1298 } |
| 1281 | 1299 |
| 1282 // Then do the other types. | 1300 // Then do the other types. |
| 1283 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { | 1301 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { |
| 1284 if (info[i].number() > 0) { | 1302 if (info[i].number() > 0) { |
| 1285 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(), | 1303 LOG(isolate, |
| 1304 HeapSampleItemEvent(info[i].name(), info[i].number(), | |
| 1286 info[i].bytes())); | 1305 info[i].bytes())); |
| 1287 } | 1306 } |
| 1288 } | 1307 } |
| 1289 LOG(HeapSampleEndEvent("NewSpace", description)); | 1308 LOG(isolate, HeapSampleEndEvent("NewSpace", description)); |
| 1290 } | 1309 } |
| 1291 #endif // ENABLE_LOGGING_AND_PROFILING | 1310 #endif // ENABLE_LOGGING_AND_PROFILING |
| 1292 | 1311 |
| 1293 | 1312 |
| 1294 void NewSpace::ReportStatistics() { | 1313 void NewSpace::ReportStatistics() { |
| 1295 #ifdef DEBUG | 1314 #ifdef DEBUG |
| 1296 if (FLAG_heap_stats) { | 1315 if (FLAG_heap_stats) { |
| 1297 float pct = static_cast<float>(Available()) / Capacity(); | 1316 float pct = static_cast<float>(Available()) / Capacity(); |
| 1298 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 1317 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 1299 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | 1318 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| 1300 Capacity(), Available(), static_cast<int>(pct*100)); | 1319 Capacity(), Available(), static_cast<int>(pct*100)); |
| 1301 PrintF("\n Object Histogram:\n"); | 1320 PrintF("\n Object Histogram:\n"); |
| 1302 for (int i = 0; i <= LAST_TYPE; i++) { | 1321 for (int i = 0; i <= LAST_TYPE; i++) { |
| 1303 if (allocated_histogram_[i].number() > 0) { | 1322 if (allocated_histogram_[i].number() > 0) { |
| 1304 PrintF(" %-34s%10d (%10d bytes)\n", | 1323 PrintF(" %-34s%10d (%10d bytes)\n", |
| 1305 allocated_histogram_[i].name(), | 1324 allocated_histogram_[i].name(), |
| 1306 allocated_histogram_[i].number(), | 1325 allocated_histogram_[i].number(), |
| 1307 allocated_histogram_[i].bytes()); | 1326 allocated_histogram_[i].bytes()); |
| 1308 } | 1327 } |
| 1309 } | 1328 } |
| 1310 PrintF("\n"); | 1329 PrintF("\n"); |
| 1311 } | 1330 } |
| 1312 #endif // DEBUG | 1331 #endif // DEBUG |
| 1313 | 1332 |
| 1314 #ifdef ENABLE_LOGGING_AND_PROFILING | 1333 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 1315 if (FLAG_log_gc) { | 1334 if (FLAG_log_gc) { |
| 1316 DoReportStatistics(allocated_histogram_, "allocated"); | 1335 Isolate* isolate = ISOLATE; |
| 1317 DoReportStatistics(promoted_histogram_, "promoted"); | 1336 DoReportStatistics(isolate, allocated_histogram_, "allocated"); |
| 1337 DoReportStatistics(isolate, promoted_histogram_, "promoted"); | |
| 1318 } | 1338 } |
| 1319 #endif // ENABLE_LOGGING_AND_PROFILING | 1339 #endif // ENABLE_LOGGING_AND_PROFILING |
| 1320 } | 1340 } |
| 1321 | 1341 |
| 1322 | 1342 |
| 1323 void NewSpace::RecordAllocation(HeapObject* obj) { | 1343 void NewSpace::RecordAllocation(HeapObject* obj) { |
| 1324 InstanceType type = obj->map()->instance_type(); | 1344 InstanceType type = obj->map()->instance_type(); |
| 1325 ASSERT(0 <= type && type <= LAST_TYPE); | 1345 ASSERT(0 <= type && type <= LAST_TYPE); |
| 1326 allocated_histogram_[type].increment_number(1); | 1346 allocated_histogram_[type].increment_number(1); |
| 1327 allocated_histogram_[type].increment_bytes(obj->Size()); | 1347 allocated_histogram_[type].increment_bytes(obj->Size()); |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 1345 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1365 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
| 1346 | 1366 |
| 1347 // We write a map and possibly size information to the block. If the block | 1367 // We write a map and possibly size information to the block. If the block |
| 1348 // is big enough to be a FreeSpace with at least one extra word (the next | 1368 // is big enough to be a FreeSpace with at least one extra word (the next |
| 1349 // pointer), we set its map to be the free space map and its size to an | 1369 // pointer), we set its map to be the free space map and its size to an |
| 1350 // appropriate array length for the desired size from HeapObject::Size(). | 1370 // appropriate array length for the desired size from HeapObject::Size(). |
| 1351 // If the block is too small (eg, one or two words), to hold both a size | 1371 // If the block is too small (eg, one or two words), to hold both a size |
| 1352 // field and a next pointer, we give it a filler map that gives it the | 1372 // field and a next pointer, we give it a filler map that gives it the |
| 1353 // correct size. | 1373 // correct size. |
| 1354 if (size_in_bytes > FreeSpace::kHeaderSize) { | 1374 if (size_in_bytes > FreeSpace::kHeaderSize) { |
| 1355 set_map(Heap::raw_unchecked_free_space_map()); | 1375 set_map(HEAP->raw_unchecked_free_space_map()); |
|
Erik Corry
2011/04/20 20:07:40
Needs a todo
Vyacheslav Egorov (Chromium)
2011/04/24 11:24:08
Done.
| |
| 1356 // Can't use FreeSpace::cast because it fails during deserialization. | 1376 // Can't use FreeSpace::cast because it fails during deserialization. |
| 1357 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); | 1377 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); |
| 1358 this_as_free_space->set_size(size_in_bytes); | 1378 this_as_free_space->set_size(size_in_bytes); |
| 1359 } else if (size_in_bytes == kPointerSize) { | 1379 } else if (size_in_bytes == kPointerSize) { |
| 1360 set_map(Heap::raw_unchecked_one_pointer_filler_map()); | 1380 set_map(HEAP->raw_unchecked_one_pointer_filler_map()); |
| 1361 } else if (size_in_bytes == 2 * kPointerSize) { | 1381 } else if (size_in_bytes == 2 * kPointerSize) { |
| 1362 set_map(Heap::raw_unchecked_two_pointer_filler_map()); | 1382 set_map(HEAP->raw_unchecked_two_pointer_filler_map()); |
| 1363 } else { | 1383 } else { |
| 1364 UNREACHABLE(); | 1384 UNREACHABLE(); |
| 1365 } | 1385 } |
| 1366 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during | 1386 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during |
| 1367 // deserialization because the free space map is not done yet. | 1387 // deserialization because the free space map is not done yet. |
| 1368 } | 1388 } |
| 1369 | 1389 |
| 1370 | 1390 |
| 1371 FreeListNode* FreeListNode::next() { | 1391 FreeListNode* FreeListNode::next() { |
| 1372 ASSERT(IsFreeListNode(this)); | 1392 ASSERT(IsFreeListNode(this)); |
| 1373 if (map() == Heap::raw_unchecked_free_space_map()) { | 1393 if (map() == HEAP->raw_unchecked_free_space_map()) { |
| 1374 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); | 1394 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); |
| 1375 return reinterpret_cast<FreeListNode*>( | 1395 return reinterpret_cast<FreeListNode*>( |
| 1376 Memory::Address_at(address() + kNextOffset)); | 1396 Memory::Address_at(address() + kNextOffset)); |
| 1377 } else { | 1397 } else { |
| 1378 return reinterpret_cast<FreeListNode*>( | 1398 return reinterpret_cast<FreeListNode*>( |
| 1379 Memory::Address_at(address() + kPointerSize)); | 1399 Memory::Address_at(address() + kPointerSize)); |
| 1380 } | 1400 } |
| 1381 } | 1401 } |
| 1382 | 1402 |
| 1383 | 1403 |
| 1384 FreeListNode** FreeListNode::next_address() { | 1404 FreeListNode** FreeListNode::next_address() { |
| 1385 ASSERT(IsFreeListNode(this)); | 1405 ASSERT(IsFreeListNode(this)); |
| 1386 if (map() == Heap::raw_unchecked_free_space_map()) { | 1406 if (map() == HEAP->raw_unchecked_free_space_map()) { |
| 1387 ASSERT(Size() >= kNextOffset + kPointerSize); | 1407 ASSERT(Size() >= kNextOffset + kPointerSize); |
| 1388 return reinterpret_cast<FreeListNode**>(address() + kNextOffset); | 1408 return reinterpret_cast<FreeListNode**>(address() + kNextOffset); |
| 1389 } else { | 1409 } else { |
| 1390 return reinterpret_cast<FreeListNode**>(address() + kPointerSize); | 1410 return reinterpret_cast<FreeListNode**>(address() + kPointerSize); |
| 1391 } | 1411 } |
| 1392 } | 1412 } |
| 1393 | 1413 |
| 1394 | 1414 |
| 1395 void FreeListNode::set_next(FreeListNode* next) { | 1415 void FreeListNode::set_next(FreeListNode* next) { |
| 1396 ASSERT(IsFreeListNode(this)); | 1416 ASSERT(IsFreeListNode(this)); |
| 1397 // While we are booting the VM the free space map will actually be null. So | 1417 // While we are booting the VM the free space map will actually be null. So |
| 1398 // we have to make sure that we don't try to use it for anything at that | 1418 // we have to make sure that we don't try to use it for anything at that |
| 1399 // stage. | 1419 // stage. |
| 1400 if (map() == Heap::raw_unchecked_free_space_map()) { | 1420 if (map() == HEAP->raw_unchecked_free_space_map()) { |
| 1401 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); | 1421 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); |
| 1402 Memory::Address_at(address() + kNextOffset) = | 1422 Memory::Address_at(address() + kNextOffset) = |
| 1403 reinterpret_cast<Address>(next); | 1423 reinterpret_cast<Address>(next); |
| 1404 } else { | 1424 } else { |
| 1405 Memory::Address_at(address() + kPointerSize) = | 1425 Memory::Address_at(address() + kPointerSize) = |
| 1406 reinterpret_cast<Address>(next); | 1426 reinterpret_cast<Address>(next); |
| 1407 } | 1427 } |
| 1408 } | 1428 } |
| 1409 | 1429 |
| 1410 | 1430 |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1474 new_node_size = new_node->Size(); | 1494 new_node_size = new_node->Size(); |
| 1475 medium_list_ = new_node->next(); | 1495 medium_list_ = new_node->next(); |
| 1476 } else if (size_in_bytes <= kLargeAllocationMax && large_list_ != NULL) { | 1496 } else if (size_in_bytes <= kLargeAllocationMax && large_list_ != NULL) { |
| 1477 new_node = large_list_; | 1497 new_node = large_list_; |
| 1478 new_node_size = new_node->Size(); | 1498 new_node_size = new_node->Size(); |
| 1479 large_list_ = new_node->next(); | 1499 large_list_ = new_node->next(); |
| 1480 } else { | 1500 } else { |
| 1481 for (FreeListNode** cur = &huge_list_; | 1501 for (FreeListNode** cur = &huge_list_; |
| 1482 *cur != NULL; | 1502 *cur != NULL; |
| 1483 cur = (*cur)->next_address()) { | 1503 cur = (*cur)->next_address()) { |
| 1484 ASSERT((*cur)->map() == Heap::raw_unchecked_free_space_map()); | 1504 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); |
| 1485 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); | 1505 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); |
| 1486 int size = cur_as_free_space->Size(); | 1506 int size = cur_as_free_space->Size(); |
| 1487 if (size >= size_in_bytes) { | 1507 if (size >= size_in_bytes) { |
| 1488 // Large enough node found. Unlink it from the list. | 1508 // Large enough node found. Unlink it from the list. |
| 1489 new_node = *cur; | 1509 new_node = *cur; |
| 1490 new_node_size = size; | 1510 new_node_size = size; |
| 1491 *cur = new_node->next(); | 1511 *cur = new_node->next(); |
| 1492 break; | 1512 break; |
| 1493 } | 1513 } |
| 1494 } | 1514 } |
| 1495 if (new_node == NULL) return NULL; | 1515 if (new_node == NULL) return NULL; |
| 1496 } | 1516 } |
| 1497 | 1517 |
| 1498 available_ -= new_node_size; | 1518 available_ -= new_node_size; |
| 1499 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | 1519 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
| 1500 | 1520 |
| 1501 int old_linear_size = owner_->limit() - owner_->top(); | 1521 int old_linear_size = owner_->limit() - owner_->top(); |
| 1502 // Mark the old linear allocation area with a free space map so it can be | 1522 // Mark the old linear allocation area with a free space map so it can be |
| 1503 // skipped when scanning the heap. This also puts it back in the free list | 1523 // skipped when scanning the heap. This also puts it back in the free list |
| 1504 // if it is big enough. | 1524 // if it is big enough. |
| 1505 owner_->Free(owner_->top(), old_linear_size); | 1525 owner_->Free(owner_->top(), old_linear_size); |
| 1506 IncrementalMarking::Step(size_in_bytes - old_linear_size); | 1526 // TODO(gc) ISOLATES MERGE |
| 1527 HEAP->incremental_marking()->Step(size_in_bytes - old_linear_size); | |
| 1507 | 1528 |
| 1508 ASSERT(new_node_size - size_in_bytes >= 0); // New linear size. | 1529 ASSERT(new_node_size - size_in_bytes >= 0); // New linear size. |
| 1509 | 1530 |
| 1510 const int kThreshold = IncrementalMarking::kAllocatedThreshold; | 1531 const int kThreshold = IncrementalMarking::kAllocatedThreshold; |
| 1511 | 1532 |
| 1512 // Memory in the linear allocation area is counted as allocated. We may free | 1533 // Memory in the linear allocation area is counted as allocated. We may free |
| 1513 // a little of this again immediately - see below. | 1534 // a little of this again immediately - see below. |
| 1514 owner_->Allocate(new_node_size); | 1535 owner_->Allocate(new_node_size); |
| 1515 | 1536 |
| 1516 if (new_node_size - size_in_bytes > kThreshold && | 1537 if (new_node_size - size_in_bytes > kThreshold && |
| 1517 IncrementalMarking::state() == IncrementalMarking::MARKING && | 1538 HEAP->incremental_marking()->IsMarking() && |
| 1518 FLAG_incremental_marking_steps) { | 1539 FLAG_incremental_marking_steps) { |
| 1519 // We don't want to give too large linear areas to the allocator while | 1540 // We don't want to give too large linear areas to the allocator while |
| 1520 // incremental marking is going on, because we won't check again whether | 1541 // incremental marking is going on, because we won't check again whether |
| 1521 // we want to do another increment until the linear area is used up. | 1542 // we want to do another increment until the linear area is used up. |
| 1522 owner_->Free(new_node->address() + size_in_bytes + kThreshold, | 1543 owner_->Free(new_node->address() + size_in_bytes + kThreshold, |
| 1523 new_node_size - size_in_bytes - kThreshold); | 1544 new_node_size - size_in_bytes - kThreshold); |
| 1524 owner_->SetTop(new_node->address() + size_in_bytes, | 1545 owner_->SetTop(new_node->address() + size_in_bytes, |
| 1525 new_node->address() + size_in_bytes + kThreshold); | 1546 new_node->address() + size_in_bytes + kThreshold); |
| 1526 } else { | 1547 } else { |
| 1527 // Normally we give the rest of the node to the allocator as its new | 1548 // Normally we give the rest of the node to the allocator as its new |
| 1528 // linear allocation area. | 1549 // linear allocation area. |
| 1529 owner_->SetTop(new_node->address() + size_in_bytes, | 1550 owner_->SetTop(new_node->address() + size_in_bytes, |
| 1530 new_node->address() + new_node_size); | 1551 new_node->address() + new_node_size); |
| 1531 } | 1552 } |
| 1532 | 1553 |
| 1533 return new_node; | 1554 return new_node; |
| 1534 } | 1555 } |
| 1535 | 1556 |
| 1536 | 1557 |
| 1537 #ifdef DEBUG | 1558 #ifdef DEBUG |
| 1538 intptr_t OldSpaceFreeList::SumFreeList(FreeListNode* cur) { | 1559 intptr_t OldSpaceFreeList::SumFreeList(FreeListNode* cur) { |
| 1539 intptr_t sum = 0; | 1560 intptr_t sum = 0; |
| 1540 while (cur != NULL) { | 1561 while (cur != NULL) { |
| 1541 ASSERT(cur->map() == Heap::raw_unchecked_free_space_map()); | 1562 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map()); |
| 1542 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); | 1563 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); |
| 1543 sum += cur_as_free_space->Size(); | 1564 sum += cur_as_free_space->Size(); |
| 1544 cur = cur->next(); | 1565 cur = cur->next(); |
| 1545 } | 1566 } |
| 1546 return sum; | 1567 return sum; |
| 1547 } | 1568 } |
| 1548 | 1569 |
| 1549 | 1570 |
| 1550 static const int kVeryLongFreeList = 500; | 1571 static const int kVeryLongFreeList = 500; |
| 1551 | 1572 |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1637 | 1658 |
| 1638 SetTop(new_area->address(), new_area->address() + size_in_bytes); | 1659 SetTop(new_area->address(), new_area->address() + size_in_bytes); |
| 1639 Allocate(size_in_bytes); | 1660 Allocate(size_in_bytes); |
| 1640 return true; | 1661 return true; |
| 1641 } | 1662 } |
| 1642 | 1663 |
| 1643 | 1664 |
| 1644 // You have to call this last, since the implementation from PagedSpace | 1665 // You have to call this last, since the implementation from PagedSpace |
| 1645 // doesn't know that memory was 'promised' to large object space. | 1666 // doesn't know that memory was 'promised' to large object space. |
| 1646 bool LargeObjectSpace::ReserveSpace(int bytes) { | 1667 bool LargeObjectSpace::ReserveSpace(int bytes) { |
| 1647 return Heap::OldGenerationSpaceAvailable() >= bytes; | 1668 return heap()->OldGenerationSpaceAvailable() >= bytes; |
| 1648 } | 1669 } |
| 1649 | 1670 |
| 1650 | 1671 |
| 1651 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { | 1672 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| 1652 // Allocation in this space has failed. | 1673 // Allocation in this space has failed. |
| 1653 | 1674 |
| 1654 // Free list allocation failed and there is no next page. Fail if we have | 1675 // Free list allocation failed and there is no next page. Fail if we have |
| 1655 // hit the old generation size limit that should cause a garbage | 1676 // hit the old generation size limit that should cause a garbage |
| 1656 // collection. | 1677 // collection. |
| 1657 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { | 1678 if (!heap()->always_allocate() && |
| 1679 heap()->OldGenerationAllocationLimitReached()) { | |
| 1658 return NULL; | 1680 return NULL; |
| 1659 } | 1681 } |
| 1660 | 1682 |
| 1661 // Try to expand the space and allocate in the new next page. | 1683 // Try to expand the space and allocate in the new next page. |
| 1662 if (Expand()) { | 1684 if (Expand()) { |
| 1663 return free_list_.Allocate(size_in_bytes); | 1685 return free_list_.Allocate(size_in_bytes); |
| 1664 } | 1686 } |
| 1665 | 1687 |
| 1666 // Finally, fail. | 1688 // Finally, fail. |
| 1667 return NULL; | 1689 return NULL; |
| 1668 } | 1690 } |
| 1669 | 1691 |
| 1670 | 1692 |
| 1671 #ifdef DEBUG | 1693 #ifdef DEBUG |
| 1672 struct CommentStatistic { | |
| 1673 const char* comment; | |
| 1674 int size; | |
| 1675 int count; | |
| 1676 void Clear() { | |
| 1677 comment = NULL; | |
| 1678 size = 0; | |
| 1679 count = 0; | |
| 1680 } | |
| 1681 }; | |
| 1682 | |
| 1683 | |
| 1684 // must be small, since an iteration is used for lookup | |
| 1685 const int kMaxComments = 64; | |
| 1686 static CommentStatistic comments_statistics[kMaxComments+1]; | |
| 1687 | |
| 1688 | |
| 1689 void PagedSpace::ReportCodeStatistics() { | 1694 void PagedSpace::ReportCodeStatistics() { |
| 1695 Isolate* isolate = Isolate::Current(); | |
| 1696 CommentStatistic* comments_statistics = | |
| 1697 isolate->paged_space_comments_statistics(); | |
| 1690 ReportCodeKindStatistics(); | 1698 ReportCodeKindStatistics(); |
| 1691 PrintF("Code comment statistics (\" [ comment-txt : size/ " | 1699 PrintF("Code comment statistics (\" [ comment-txt : size/ " |
| 1692 "count (average)\"):\n"); | 1700 "count (average)\"):\n"); |
| 1693 for (int i = 0; i <= kMaxComments; i++) { | 1701 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { |
| 1694 const CommentStatistic& cs = comments_statistics[i]; | 1702 const CommentStatistic& cs = comments_statistics[i]; |
| 1695 if (cs.size > 0) { | 1703 if (cs.size > 0) { |
| 1696 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count, | 1704 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count, |
| 1697 cs.size/cs.count); | 1705 cs.size/cs.count); |
| 1698 } | 1706 } |
| 1699 } | 1707 } |
| 1700 PrintF("\n"); | 1708 PrintF("\n"); |
| 1701 } | 1709 } |
| 1702 | 1710 |
| 1703 | 1711 |
| 1704 void PagedSpace::ResetCodeStatistics() { | 1712 void PagedSpace::ResetCodeStatistics() { |
| 1713 Isolate* isolate = Isolate::Current(); | |
| 1714 CommentStatistic* comments_statistics = | |
| 1715 isolate->paged_space_comments_statistics(); | |
| 1705 ClearCodeKindStatistics(); | 1716 ClearCodeKindStatistics(); |
| 1706 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear(); | 1717 for (int i = 0; i < CommentStatistic::kMaxComments; i++) { |
| 1707 comments_statistics[kMaxComments].comment = "Unknown"; | 1718 comments_statistics[i].Clear(); |
| 1708 comments_statistics[kMaxComments].size = 0; | 1719 } |
| 1709 comments_statistics[kMaxComments].count = 0; | 1720 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown"; |
| 1721 comments_statistics[CommentStatistic::kMaxComments].size = 0; | |
| 1722 comments_statistics[CommentStatistic::kMaxComments].count = 0; | |
| 1710 } | 1723 } |
| 1711 | 1724 |
| 1712 | 1725 |
| 1713 // Adds comment to 'comment_statistics' table. Performance OK sa long as | 1726 // Adds comment to 'comment_statistics' table. Performance OK as long as |
| 1714 // 'kMaxComments' is small | 1727 // 'kMaxComments' is small |
| 1715 static void EnterComment(const char* comment, int delta) { | 1728 static void EnterComment(Isolate* isolate, const char* comment, int delta) { |
| 1729 CommentStatistic* comments_statistics = | |
| 1730 isolate->paged_space_comments_statistics(); | |
| 1716 // Do not count empty comments | 1731 // Do not count empty comments |
| 1717 if (delta <= 0) return; | 1732 if (delta <= 0) return; |
| 1718 CommentStatistic* cs = &comments_statistics[kMaxComments]; | 1733 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments]; |
| 1719 // Search for a free or matching entry in 'comments_statistics': 'cs' | 1734 // Search for a free or matching entry in 'comments_statistics': 'cs' |
| 1720 // points to result. | 1735 // points to result. |
| 1721 for (int i = 0; i < kMaxComments; i++) { | 1736 for (int i = 0; i < CommentStatistic::kMaxComments; i++) { |
| 1722 if (comments_statistics[i].comment == NULL) { | 1737 if (comments_statistics[i].comment == NULL) { |
| 1723 cs = &comments_statistics[i]; | 1738 cs = &comments_statistics[i]; |
| 1724 cs->comment = comment; | 1739 cs->comment = comment; |
| 1725 break; | 1740 break; |
| 1726 } else if (strcmp(comments_statistics[i].comment, comment) == 0) { | 1741 } else if (strcmp(comments_statistics[i].comment, comment) == 0) { |
| 1727 cs = &comments_statistics[i]; | 1742 cs = &comments_statistics[i]; |
| 1728 break; | 1743 break; |
| 1729 } | 1744 } |
| 1730 } | 1745 } |
| 1731 // Update entry for 'comment' | 1746 // Update entry for 'comment' |
| 1732 cs->size += delta; | 1747 cs->size += delta; |
| 1733 cs->count += 1; | 1748 cs->count += 1; |
| 1734 } | 1749 } |
| 1735 | 1750 |
| 1736 | 1751 |
| 1737 // Call for each nested comment start (start marked with '[ xxx', end marked | 1752 // Call for each nested comment start (start marked with '[ xxx', end marked |
| 1738 // with ']'. RelocIterator 'it' must point to a comment reloc info. | 1753 // with ']'. RelocIterator 'it' must point to a comment reloc info. |
| 1739 static void CollectCommentStatistics(RelocIterator* it) { | 1754 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) { |
| 1740 ASSERT(!it->done()); | 1755 ASSERT(!it->done()); |
| 1741 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT); | 1756 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT); |
| 1742 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data()); | 1757 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data()); |
| 1743 if (tmp[0] != '[') { | 1758 if (tmp[0] != '[') { |
| 1744 // Not a nested comment; skip | 1759 // Not a nested comment; skip |
| 1745 return; | 1760 return; |
| 1746 } | 1761 } |
| 1747 | 1762 |
| 1748 // Search for end of nested comment or a new nested comment | 1763 // Search for end of nested comment or a new nested comment |
| 1749 const char* const comment_txt = | 1764 const char* const comment_txt = |
| 1750 reinterpret_cast<const char*>(it->rinfo()->data()); | 1765 reinterpret_cast<const char*>(it->rinfo()->data()); |
| 1751 const byte* prev_pc = it->rinfo()->pc(); | 1766 const byte* prev_pc = it->rinfo()->pc(); |
| 1752 int flat_delta = 0; | 1767 int flat_delta = 0; |
| 1753 it->next(); | 1768 it->next(); |
| 1754 while (true) { | 1769 while (true) { |
| 1755 // All nested comments must be terminated properly, and therefore exit | 1770 // All nested comments must be terminated properly, and therefore exit |
| 1756 // from loop. | 1771 // from loop. |
| 1757 ASSERT(!it->done()); | 1772 ASSERT(!it->done()); |
| 1758 if (it->rinfo()->rmode() == RelocInfo::COMMENT) { | 1773 if (it->rinfo()->rmode() == RelocInfo::COMMENT) { |
| 1759 const char* const txt = | 1774 const char* const txt = |
| 1760 reinterpret_cast<const char*>(it->rinfo()->data()); | 1775 reinterpret_cast<const char*>(it->rinfo()->data()); |
| 1761 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); | 1776 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); |
| 1762 if (txt[0] == ']') break; // End of nested comment | 1777 if (txt[0] == ']') break; // End of nested comment |
| 1763 // A new comment | 1778 // A new comment |
| 1764 CollectCommentStatistics(it); | 1779 CollectCommentStatistics(isolate, it); |
| 1765 // Skip code that was covered with previous comment | 1780 // Skip code that was covered with previous comment |
| 1766 prev_pc = it->rinfo()->pc(); | 1781 prev_pc = it->rinfo()->pc(); |
| 1767 } | 1782 } |
| 1768 it->next(); | 1783 it->next(); |
| 1769 } | 1784 } |
| 1770 EnterComment(comment_txt, flat_delta); | 1785 EnterComment(isolate, comment_txt, flat_delta); |
| 1771 } | 1786 } |
| 1772 | 1787 |
| 1773 | 1788 |
| 1774 // Collects code size statistics: | 1789 // Collects code size statistics: |
| 1775 // - by code kind | 1790 // - by code kind |
| 1776 // - by code comment | 1791 // - by code comment |
| 1777 void PagedSpace::CollectCodeStatistics() { | 1792 void PagedSpace::CollectCodeStatistics() { |
| 1793 Isolate* isolate = heap()->isolate(); | |
| 1778 HeapObjectIterator obj_it(this); | 1794 HeapObjectIterator obj_it(this); |
| 1779 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { | 1795 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { |
| 1780 if (obj->IsCode()) { | 1796 if (obj->IsCode()) { |
| 1781 Code* code = Code::cast(obj); | 1797 Code* code = Code::cast(obj); |
| 1782 code_kind_statistics[code->kind()] += code->Size(); | 1798 isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 1783 RelocIterator it(code); | 1799 RelocIterator it(code); |
| 1784 int delta = 0; | 1800 int delta = 0; |
| 1785 const byte* prev_pc = code->instruction_start(); | 1801 const byte* prev_pc = code->instruction_start(); |
| 1786 while (!it.done()) { | 1802 while (!it.done()) { |
| 1787 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { | 1803 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { |
| 1788 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); | 1804 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); |
| 1789 CollectCommentStatistics(&it); | 1805 CollectCommentStatistics(isolate, &it); |
| 1790 prev_pc = it.rinfo()->pc(); | 1806 prev_pc = it.rinfo()->pc(); |
| 1791 } | 1807 } |
| 1792 it.next(); | 1808 it.next(); |
| 1793 } | 1809 } |
| 1794 | 1810 |
| 1795 ASSERT(code->instruction_start() <= prev_pc && | 1811 ASSERT(code->instruction_start() <= prev_pc && |
| 1796 prev_pc <= code->instruction_end()); | 1812 prev_pc <= code->instruction_end()); |
| 1797 delta += static_cast<int>(code->instruction_end() - prev_pc); | 1813 delta += static_cast<int>(code->instruction_end() - prev_pc); |
| 1798 EnterComment("NoComment", delta); | 1814 EnterComment(isolate, "NoComment", delta); |
| 1799 } | 1815 } |
| 1800 } | 1816 } |
| 1801 } | 1817 } |
| 1802 | 1818 |
| 1803 | 1819 |
| 1804 void PagedSpace::ReportStatistics() { | 1820 void PagedSpace::ReportStatistics() { |
| 1805 int pct = static_cast<int>(Available() * 100 / Capacity()); | 1821 int pct = static_cast<int>(Available() * 100 / Capacity()); |
| 1806 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 1822 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 1807 ", waste: %" V8_PTR_PREFIX "d" | 1823 ", waste: %" V8_PTR_PREFIX "d" |
| 1808 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | 1824 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1854 #endif | 1870 #endif |
| 1855 | 1871 |
| 1856 | 1872 |
| 1857 // ----------------------------------------------------------------------------- | 1873 // ----------------------------------------------------------------------------- |
| 1858 // GlobalPropertyCellSpace implementation | 1874 // GlobalPropertyCellSpace implementation |
| 1859 | 1875 |
| 1860 #ifdef DEBUG | 1876 #ifdef DEBUG |
| 1861 void CellSpace::VerifyObject(HeapObject* object) { | 1877 void CellSpace::VerifyObject(HeapObject* object) { |
| 1862 // The object should be a global object property cell or a free-list node. | 1878 // The object should be a global object property cell or a free-list node. |
| 1863 ASSERT(object->IsJSGlobalPropertyCell() || | 1879 ASSERT(object->IsJSGlobalPropertyCell() || |
| 1864 object->map() == Heap::two_pointer_filler_map()); | 1880 object->map() == heap()->two_pointer_filler_map()); |
| 1865 } | 1881 } |
| 1866 #endif | 1882 #endif |
| 1867 | 1883 |
| 1868 | 1884 |
| 1869 // ----------------------------------------------------------------------------- | 1885 // ----------------------------------------------------------------------------- |
| 1870 // LargeObjectIterator | 1886 // LargeObjectIterator |
| 1871 | 1887 |
| 1872 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { | 1888 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { |
| 1873 current_ = space->first_page_; | 1889 current_ = space->first_page_; |
| 1874 size_func_ = NULL; | 1890 size_func_ = NULL; |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 1887 | 1903 |
| 1888 HeapObject* object = current_->GetObject(); | 1904 HeapObject* object = current_->GetObject(); |
| 1889 current_ = current_->next_page(); | 1905 current_ = current_->next_page(); |
| 1890 return object; | 1906 return object; |
| 1891 } | 1907 } |
| 1892 | 1908 |
| 1893 | 1909 |
| 1894 // ----------------------------------------------------------------------------- | 1910 // ----------------------------------------------------------------------------- |
| 1895 // LargeObjectSpace | 1911 // LargeObjectSpace |
| 1896 | 1912 |
| 1897 LargeObjectSpace::LargeObjectSpace(AllocationSpace id) | 1913 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) |
| 1898 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis | 1914 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis |
| 1899 first_page_(NULL), | 1915 first_page_(NULL), |
| 1900 size_(0), | 1916 size_(0), |
| 1901 page_count_(0), | 1917 page_count_(0), |
| 1902 objects_size_(0) {} | 1918 objects_size_(0) {} |
| 1903 | 1919 |
| 1904 | 1920 |
| 1905 bool LargeObjectSpace::Setup() { | 1921 bool LargeObjectSpace::Setup() { |
| 1906 first_page_ = NULL; | 1922 first_page_ = NULL; |
| 1907 size_ = 0; | 1923 size_ = 0; |
| 1908 page_count_ = 0; | 1924 page_count_ = 0; |
| 1909 objects_size_ = 0; | 1925 objects_size_ = 0; |
| 1910 return true; | 1926 return true; |
| 1911 } | 1927 } |
| 1912 | 1928 |
| 1913 | 1929 |
| 1914 void LargeObjectSpace::TearDown() { | 1930 void LargeObjectSpace::TearDown() { |
| 1915 while (first_page_ != NULL) { | 1931 while (first_page_ != NULL) { |
| 1916 LargePage* page = first_page_; | 1932 LargePage* page = first_page_; |
| 1917 first_page_ = first_page_->next_page(); | 1933 first_page_ = first_page_->next_page(); |
| 1918 | 1934 |
| 1919 MemoryAllocator::Free(page); | 1935 heap()->isolate()->memory_allocator()->Free(page); |
| 1920 } | 1936 } |
| 1921 | 1937 |
| 1922 size_ = 0; | 1938 size_ = 0; |
| 1923 page_count_ = 0; | 1939 page_count_ = 0; |
| 1924 objects_size_ = 0; | 1940 objects_size_ = 0; |
| 1925 } | 1941 } |
| 1926 | 1942 |
| 1927 | 1943 |
| 1928 #ifdef ENABLE_HEAP_PROTECTION | 1944 #ifdef ENABLE_HEAP_PROTECTION |
| 1929 | 1945 |
| 1930 void LargeObjectSpace::Protect() { | 1946 void LargeObjectSpace::Protect() { |
| 1931 LargeObjectChunk* chunk = first_chunk_; | 1947 LargeObjectChunk* chunk = first_chunk_; |
| 1932 while (chunk != NULL) { | 1948 while (chunk != NULL) { |
| 1933 MemoryAllocator::Protect(chunk->address(), chunk->size()); | 1949 heap()->isolate()->memory_allocator()->Protect(chunk->address(), |
| 1950 chunk->size()); | |
| 1934 chunk = chunk->next(); | 1951 chunk = chunk->next(); |
| 1935 } | 1952 } |
| 1936 } | 1953 } |
| 1937 | 1954 |
| 1938 | 1955 |
| 1939 void LargeObjectSpace::Unprotect() { | 1956 void LargeObjectSpace::Unprotect() { |
| 1940 LargeObjectChunk* chunk = first_chunk_; | 1957 LargeObjectChunk* chunk = first_chunk_; |
| 1941 while (chunk != NULL) { | 1958 while (chunk != NULL) { |
| 1942 bool is_code = chunk->GetObject()->IsCode(); | 1959 bool is_code = chunk->GetObject()->IsCode(); |
| 1943 MemoryAllocator::Unprotect(chunk->address(), chunk->size(), | 1960 heap()->isolate()->memory_allocator()->Unprotect(chunk->address(), |
| 1944 is_code ? EXECUTABLE : NOT_EXECUTABLE); | 1961 chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE); |
| 1945 chunk = chunk->next(); | 1962 chunk = chunk->next(); |
| 1946 } | 1963 } |
| 1947 } | 1964 } |
| 1948 | 1965 |
| 1949 #endif | 1966 #endif |
| 1950 | 1967 |
| 1951 MaybeObject* LargeObjectSpace::AllocateRawInternal(int object_size, | 1968 MaybeObject* LargeObjectSpace::AllocateRawInternal(int object_size, |
| 1952 Executability executable) { | 1969 Executability executable) { |
| 1953 // Check if we want to force a GC before growing the old space further. | 1970 // Check if we want to force a GC before growing the old space further. |
| 1954 // If so, fail the allocation. | 1971 // If so, fail the allocation. |
| 1955 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { | 1972 if (!heap()->always_allocate() && |
| 1973 heap()->OldGenerationAllocationLimitReached()) { | |
| 1956 return Failure::RetryAfterGC(identity()); | 1974 return Failure::RetryAfterGC(identity()); |
| 1957 } | 1975 } |
| 1958 | 1976 |
| 1959 LargePage* page = MemoryAllocator::AllocateLargePage(object_size, | 1977 // TODO(gc) isolates merge |
| 1960 executable, | 1978 LargePage* page = heap()->isolate()->memory_allocator()-> |
| 1961 this); | 1979 AllocateLargePage(object_size, executable, this); |
| 1962 if (page == NULL) return Failure::RetryAfterGC(identity()); | 1980 if (page == NULL) return Failure::RetryAfterGC(identity()); |
| 1963 ASSERT(page->body_size() >= object_size); | 1981 ASSERT(page->body_size() >= object_size); |
| 1964 | 1982 |
| 1965 size_ += static_cast<int>(page->size()); | 1983 size_ += static_cast<int>(page->size()); |
| 1966 objects_size_ += object_size; | 1984 objects_size_ += object_size; |
| 1967 page_count_++; | 1985 page_count_++; |
| 1968 page->set_next_page(first_page_); | 1986 page->set_next_page(first_page_); |
| 1969 first_page_ = page; | 1987 first_page_ = page; |
| 1970 | 1988 |
| 1971 | 1989 |
| 1972 IncrementalMarking::Step(object_size); | 1990 heap()->incremental_marking()->Step(object_size); |
| 1973 return page->GetObject(); | 1991 return page->GetObject(); |
| 1974 } | 1992 } |
| 1975 | 1993 |
| 1976 | 1994 |
| 1977 MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { | 1995 MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { |
| 1978 ASSERT(0 < size_in_bytes); | 1996 ASSERT(0 < size_in_bytes); |
| 1979 return AllocateRawInternal(size_in_bytes, EXECUTABLE); | 1997 return AllocateRawInternal(size_in_bytes, EXECUTABLE); |
| 1980 } | 1998 } |
| 1981 | 1999 |
| 1982 | 2000 |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2027 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { | 2045 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { |
| 2028 // We only have code, sequential strings, or fixed arrays in large | 2046 // We only have code, sequential strings, or fixed arrays in large |
| 2029 // object space, and only fixed arrays can possibly contain pointers to | 2047 // object space, and only fixed arrays can possibly contain pointers to |
| 2030 // the young generation. | 2048 // the young generation. |
| 2031 if (object->IsFixedArray()) { | 2049 if (object->IsFixedArray()) { |
| 2032 // TODO(gc): we can no longer assume that LargePage is bigger than normal | 2050 // TODO(gc): we can no longer assume that LargePage is bigger than normal |
| 2033 // page. | 2051 // page. |
| 2034 | 2052 |
| 2035 Address start = object->address(); | 2053 Address start = object->address(); |
| 2036 Address object_end = start + object->Size(); | 2054 Address object_end = start + object->Size(); |
| 2037 Heap::IteratePointersToNewSpace(start, object_end, copy_object); | 2055 heap()->IteratePointersToNewSpace(heap(), start, object_end, copy_object); |
| 2038 } | 2056 } |
| 2039 } | 2057 } |
| 2040 } | 2058 } |
| 2041 | 2059 |
| 2042 | 2060 |
| 2043 void LargeObjectSpace::FreeUnmarkedObjects() { | 2061 void LargeObjectSpace::FreeUnmarkedObjects() { |
| 2044 LargePage* previous = NULL; | 2062 LargePage* previous = NULL; |
| 2045 LargePage* current = first_page_; | 2063 LargePage* current = first_page_; |
| 2046 while (current != NULL) { | 2064 while (current != NULL) { |
| 2047 HeapObject* object = current->GetObject(); | 2065 HeapObject* object = current->GetObject(); |
| 2048 MarkBit mark_bit = Marking::MarkBitFrom(object); | 2066 MarkBit mark_bit = heap()->marking()->MarkBitFrom(object); |
| 2049 if (mark_bit.Get()) { | 2067 if (mark_bit.Get()) { |
| 2050 mark_bit.Clear(); | 2068 mark_bit.Clear(); |
| 2051 MarkCompactCollector::tracer()->decrement_marked_count(); | 2069 heap()->mark_compact_collector()->tracer()->decrement_marked_count(); |
| 2052 previous = current; | 2070 previous = current; |
| 2053 current = current->next_page(); | 2071 current = current->next_page(); |
| 2054 } else { | 2072 } else { |
| 2055 LargePage* page = current; | 2073 LargePage* page = current; |
| 2056 // Cut the chunk out from the chunk list. | 2074 // Cut the chunk out from the chunk list. |
| 2057 current = current->next_page(); | 2075 current = current->next_page(); |
| 2058 if (previous == NULL) { | 2076 if (previous == NULL) { |
| 2059 first_page_ = current; | 2077 first_page_ = current; |
| 2060 } else { | 2078 } else { |
| 2061 previous->set_next_page(current); | 2079 previous->set_next_page(current); |
| 2062 } | 2080 } |
| 2063 | 2081 |
| 2064 // Free the chunk. | 2082 // Free the chunk. |
| 2065 MarkCompactCollector::ReportDeleteIfNeeded(object); | 2083 heap()->mark_compact_collector()->ReportDeleteIfNeeded(object); |
| 2066 size_ -= static_cast<int>(page->size()); | 2084 size_ -= static_cast<int>(page->size()); |
| 2067 objects_size_ -= object->Size(); | 2085 objects_size_ -= object->Size(); |
| 2068 page_count_--; | 2086 page_count_--; |
| 2069 | 2087 |
| 2070 MemoryAllocator::Free(page); | 2088 heap()->isolate()->memory_allocator()->Free(page); |
| 2071 } | 2089 } |
| 2072 } | 2090 } |
| 2073 } | 2091 } |
| 2074 | 2092 |
| 2075 | 2093 |
| 2076 bool LargeObjectSpace::Contains(HeapObject* object) { | 2094 bool LargeObjectSpace::Contains(HeapObject* object) { |
| 2077 Address address = object->address(); | 2095 Address address = object->address(); |
| 2078 if (Heap::new_space()->Contains(address)) { | 2096 if (heap()->new_space()->Contains(address)) { |
| 2079 return false; | 2097 return false; |
| 2080 } | 2098 } |
| 2081 MemoryChunk* chunk = MemoryChunk::FromAddress(address); | 2099 MemoryChunk* chunk = MemoryChunk::FromAddress(address); |
| 2082 | 2100 |
| 2083 bool owned = chunk->owner() == this; | 2101 bool owned = chunk->owner() == this; |
| 2084 | 2102 |
| 2085 SLOW_ASSERT(!owned | 2103 SLOW_ASSERT(!owned |
| 2086 || !FindObject(address)->IsFailure()); | 2104 || !FindObject(address)->IsFailure()); |
| 2087 | 2105 |
| 2088 return owned; | 2106 return owned; |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 2099 // Each chunk contains an object that starts at the large object page's | 2117 // Each chunk contains an object that starts at the large object page's |
| 2100 // object area start. | 2118 // object area start. |
| 2101 HeapObject* object = chunk->GetObject(); | 2119 HeapObject* object = chunk->GetObject(); |
| 2102 Page* page = Page::FromAddress(object->address()); | 2120 Page* page = Page::FromAddress(object->address()); |
| 2103 ASSERT(object->address() == page->ObjectAreaStart()); | 2121 ASSERT(object->address() == page->ObjectAreaStart()); |
| 2104 | 2122 |
| 2105 // The first word should be a map, and we expect all map pointers to be | 2123 // The first word should be a map, and we expect all map pointers to be |
| 2106 // in map space. | 2124 // in map space. |
| 2107 Map* map = object->map(); | 2125 Map* map = object->map(); |
| 2108 ASSERT(map->IsMap()); | 2126 ASSERT(map->IsMap()); |
| 2109 ASSERT(Heap::map_space()->Contains(map)); | 2127 ASSERT(heap()->map_space()->Contains(map)); |
| 2110 | 2128 |
| 2111 // We have only code, sequential strings, external strings | 2129 // We have only code, sequential strings, external strings |
| 2112 // (sequential strings that have been morphed into external | 2130 // (sequential strings that have been morphed into external |
| 2113 // strings), fixed arrays, and byte arrays in large object space. | 2131 // strings), fixed arrays, and byte arrays in large object space. |
| 2114 ASSERT(object->IsCode() || object->IsSeqString() || | 2132 ASSERT(object->IsCode() || object->IsSeqString() || |
| 2115 object->IsExternalString() || object->IsFixedArray() || | 2133 object->IsExternalString() || object->IsFixedArray() || |
| 2116 object->IsByteArray()); | 2134 object->IsByteArray()); |
| 2117 | 2135 |
| 2118 // The object itself should look OK. | 2136 // The object itself should look OK. |
| 2119 object->Verify(); | 2137 object->Verify(); |
| 2120 | 2138 |
| 2121 // Byte arrays and strings don't have interior pointers. | 2139 // Byte arrays and strings don't have interior pointers. |
| 2122 if (object->IsCode()) { | 2140 if (object->IsCode()) { |
| 2123 VerifyPointersVisitor code_visitor; | 2141 VerifyPointersVisitor code_visitor; |
| 2124 object->IterateBody(map->instance_type(), | 2142 object->IterateBody(map->instance_type(), |
| 2125 object->Size(), | 2143 object->Size(), |
| 2126 &code_visitor); | 2144 &code_visitor); |
| 2127 } else if (object->IsFixedArray()) { | 2145 } else if (object->IsFixedArray()) { |
| 2128 FixedArray* array = FixedArray::cast(object); | 2146 FixedArray* array = FixedArray::cast(object); |
| 2129 for (int j = 0; j < array->length(); j++) { | 2147 for (int j = 0; j < array->length(); j++) { |
| 2130 Object* element = array->get(j); | 2148 Object* element = array->get(j); |
| 2131 if (element->IsHeapObject()) { | 2149 if (element->IsHeapObject()) { |
| 2132 HeapObject* element_object = HeapObject::cast(element); | 2150 HeapObject* element_object = HeapObject::cast(element); |
| 2133 ASSERT(Heap::Contains(element_object)); | 2151 ASSERT(heap()->Contains(element_object)); |
| 2134 ASSERT(element_object->map()->IsMap()); | 2152 ASSERT(element_object->map()->IsMap()); |
| 2135 } | 2153 } |
| 2136 } | 2154 } |
| 2137 } | 2155 } |
| 2138 } | 2156 } |
| 2139 } | 2157 } |
| 2140 | 2158 |
| 2141 | 2159 |
| 2142 void LargeObjectSpace::Print() { | 2160 void LargeObjectSpace::Print() { |
| 2143 LargeObjectIterator it(this); | 2161 LargeObjectIterator it(this); |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 2157 CollectHistogramInfo(obj); | 2175 CollectHistogramInfo(obj); |
| 2158 } | 2176 } |
| 2159 | 2177 |
| 2160 PrintF(" number of objects %d, " | 2178 PrintF(" number of objects %d, " |
| 2161 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_); | 2179 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_); |
| 2162 if (num_objects > 0) ReportHistogram(false); | 2180 if (num_objects > 0) ReportHistogram(false); |
| 2163 } | 2181 } |
| 2164 | 2182 |
| 2165 | 2183 |
| 2166 void LargeObjectSpace::CollectCodeStatistics() { | 2184 void LargeObjectSpace::CollectCodeStatistics() { |
| 2185 Isolate* isolate = heap()->isolate(); | |
| 2167 LargeObjectIterator obj_it(this); | 2186 LargeObjectIterator obj_it(this); |
| 2168 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 2187 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
| 2169 if (obj->IsCode()) { | 2188 if (obj->IsCode()) { |
| 2170 Code* code = Code::cast(obj); | 2189 Code* code = Code::cast(obj); |
| 2171 code_kind_statistics[code->kind()] += code->Size(); | 2190 isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 2172 } | 2191 } |
| 2173 } | 2192 } |
| 2174 } | 2193 } |
| 2175 #endif // DEBUG | 2194 #endif // DEBUG |
| 2176 | 2195 |
| 2177 } } // namespace v8::internal | 2196 } } // namespace v8::internal |
| OLD | NEW |