OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
200 if (requested <= allocation_list_[current_allocation_block_index_].size) { | 200 if (requested <= allocation_list_[current_allocation_block_index_].size) { |
201 return; // Found a large enough allocation block. | 201 return; // Found a large enough allocation block. |
202 } | 202 } |
203 } | 203 } |
204 | 204 |
205 // Code range is full or too fragmented. | 205 // Code range is full or too fragmented. |
206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); | 206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); |
207 } | 207 } |
208 | 208 |
209 | 209 |
210 | |
211 Address CodeRange::AllocateRawMemory(const size_t requested, | 210 Address CodeRange::AllocateRawMemory(const size_t requested, |
212 size_t* allocated) { | 211 size_t* allocated, |
212 size_t commit_body_size) { | |
213 ASSERT(commit_body_size <= requested); | |
213 ASSERT(current_allocation_block_index_ < allocation_list_.length()); | 214 ASSERT(current_allocation_block_index_ < allocation_list_.length()); |
214 if (requested > allocation_list_[current_allocation_block_index_].size) { | 215 if (requested > allocation_list_[current_allocation_block_index_].size) { |
215 // Find an allocation block large enough. This function call may | 216 // Find an allocation block large enough. This function call may |
216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. | 217 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. |
217 GetNextAllocationBlock(requested); | 218 GetNextAllocationBlock(requested); |
218 } | 219 } |
219 // Commit the requested memory at the start of the current allocation block. | 220 // Commit the requested memory at the start of the current allocation block. |
220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); | 221 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); |
221 FreeBlock current = allocation_list_[current_allocation_block_index_]; | 222 FreeBlock current = allocation_list_[current_allocation_block_index_]; |
222 if (aligned_requested >= (current.size - Page::kPageSize)) { | 223 if (aligned_requested >= (current.size - Page::kPageSize)) { |
223 // Don't leave a small free block, useless for a large object or chunk. | 224 // Don't leave a small free block, useless for a large object or chunk. |
224 *allocated = current.size; | 225 *allocated = current.size; |
225 } else { | 226 } else { |
226 *allocated = aligned_requested; | 227 *allocated = aligned_requested; |
227 } | 228 } |
228 ASSERT(*allocated <= current.size); | 229 ASSERT(*allocated <= current.size); |
229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); | 230 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
230 if (!MemoryAllocator::CommitCodePage(code_range_, | 231 if (!MemoryAllocator::CommitCodePage(code_range_, |
231 current.start, | 232 current.start, |
232 *allocated)) { | 233 *allocated, |
234 commit_body_size)) { | |
233 *allocated = 0; | 235 *allocated = 0; |
234 return NULL; | 236 return NULL; |
235 } | 237 } |
236 allocation_list_[current_allocation_block_index_].start += *allocated; | 238 allocation_list_[current_allocation_block_index_].start += *allocated; |
237 allocation_list_[current_allocation_block_index_].size -= *allocated; | 239 allocation_list_[current_allocation_block_index_].size -= *allocated; |
238 if (*allocated == current.size) { | 240 if (*allocated == current.size) { |
239 GetNextAllocationBlock(0); // This block is used up, get the next one. | 241 GetNextAllocationBlock(0); // This block is used up, get the next one. |
240 } | 242 } |
241 return current.start; | 243 return current.start; |
242 } | 244 } |
243 | 245 |
244 | 246 |
247 bool CodeRange::CommitRawMemory(Address start, size_t length) { | |
248 // Commit page body (executable). | |
249 if (!code_range_->Commit(start, length, true)) return false; | |
danno
2013/01/17 10:49:55
how about just:
return code_range_->Commit(start,
haitao.feng
2013/01/17 14:20:33
Done.
| |
250 return true; | |
251 } | |
252 | |
253 | |
245 void CodeRange::FreeRawMemory(Address address, size_t length) { | 254 void CodeRange::FreeRawMemory(Address address, size_t length) { |
246 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); | 255 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); |
247 free_list_.Add(FreeBlock(address, length)); | 256 free_list_.Add(FreeBlock(address, length)); |
248 code_range_->Uncommit(address, length); | 257 code_range_->Uncommit(address, length); |
249 } | 258 } |
250 | 259 |
251 | 260 |
252 void CodeRange::TearDown() { | 261 void CodeRange::TearDown() { |
253 delete code_range_; // Frees all memory in the virtual memory range. | 262 delete code_range_; // Frees all memory in the virtual memory range. |
254 code_range_ = NULL; | 263 code_range_ = NULL; |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
339 } | 348 } |
340 | 349 |
341 | 350 |
342 Address MemoryAllocator::ReserveAlignedMemory(size_t size, | 351 Address MemoryAllocator::ReserveAlignedMemory(size_t size, |
343 size_t alignment, | 352 size_t alignment, |
344 VirtualMemory* controller) { | 353 VirtualMemory* controller) { |
345 VirtualMemory reservation(size, alignment); | 354 VirtualMemory reservation(size, alignment); |
346 | 355 |
347 if (!reservation.IsReserved()) return NULL; | 356 if (!reservation.IsReserved()) return NULL; |
348 size_ += reservation.size(); | 357 size_ += reservation.size(); |
349 Address base = RoundUp(static_cast<Address>(reservation.address()), | 358 Address base = static_cast<Address>(reservation.address()); |
350 alignment); | |
351 controller->TakeControl(&reservation); | 359 controller->TakeControl(&reservation); |
352 return base; | 360 return base; |
353 } | 361 } |
354 | 362 |
355 | 363 |
356 Address MemoryAllocator::AllocateAlignedMemory(size_t size, | 364 Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
danno
2013/01/17 10:49:55
nit: "requested_size" is a better name
haitao.feng
2013/01/17 14:20:33
Done.
| |
357 size_t alignment, | 365 size_t alignment, |
366 size_t commit_body_size, | |
danno
2013/01/17 10:49:55
nit: "commit_size" is a better name
nit: could you
haitao.feng
2013/01/17 14:20:33
Done.
haitao.feng
2013/01/17 14:20:33
Done.
| |
358 Executability executable, | 367 Executability executable, |
359 VirtualMemory* controller) { | 368 VirtualMemory* controller) { |
369 ASSERT(commit_body_size <= size); | |
360 VirtualMemory reservation; | 370 VirtualMemory reservation; |
361 Address base = ReserveAlignedMemory(size, alignment, &reservation); | 371 Address base = ReserveAlignedMemory(size, alignment, &reservation); |
362 if (base == NULL) return NULL; | 372 if (base == NULL) return NULL; |
363 | 373 |
364 if (executable == EXECUTABLE) { | 374 if (executable == EXECUTABLE) { |
365 if (!CommitCodePage(&reservation, base, size)) { | 375 if (!CommitCodePage(&reservation, base, size, commit_body_size)) { |
366 base = NULL; | 376 base = NULL; |
367 } | 377 } |
368 } else { | 378 } else { |
369 if (!reservation.Commit(base, size, false)) { | 379 size_t commit_size = MemoryChunk::kObjectStartOffset + commit_body_size; |
380 if (!reservation.Commit(base, commit_size, false)) { | |
370 base = NULL; | 381 base = NULL; |
371 } | 382 } |
372 } | 383 } |
373 | 384 |
374 if (base == NULL) { | 385 if (base == NULL) { |
375 // Failed to commit the body. Release the mapping and any partially | 386 // Failed to commit the body. Release the mapping and any partially |
376 // commited regions inside it. | 387 // commited regions inside it. |
377 reservation.Release(); | 388 reservation.Release(); |
378 return NULL; | 389 return NULL; |
379 } | 390 } |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
463 } | 474 } |
464 | 475 |
465 if (owner == heap->old_data_space()) { | 476 if (owner == heap->old_data_space()) { |
466 chunk->SetFlag(CONTAINS_ONLY_DATA); | 477 chunk->SetFlag(CONTAINS_ONLY_DATA); |
467 } | 478 } |
468 | 479 |
469 return chunk; | 480 return chunk; |
470 } | 481 } |
471 | 482 |
472 | 483 |
484 bool MemoryChunk::CommitBody(size_t body_size, Executability executable) { | |
485 // Already committed, no uncommitment | |
486 if (body_size <= (area_end_ - area_start_)) return true; | |
danno
2013/01/17 10:49:55
The problem with using area_end is that you change
haitao.feng
2013/01/17 14:20:33
How about "ASSERT(body_size <= static_cast<size_t>
| |
487 | |
488 size_t length = body_size - (area_end_ - area_start_); | |
489 if (reservation_.IsReserved()) { | |
490 if (!reservation_.Commit(area_end_, length, executable == EXECUTABLE)) { | |
491 return false; | |
492 } | |
493 } else { | |
494 CodeRange* code_range = heap_->isolate()->code_range(); | |
495 ASSERT(code_range->exists() && (executable == EXECUTABLE)); | |
496 if (!code_range->CommitRawMemory(area_end_, length)) return false; | |
497 } | |
498 | |
499 if (Heap::ShouldZapGarbage()) { | |
500 heap_->isolate()->memory_allocator()->ZapBlock(area_end_, length); | |
501 } | |
502 | |
503 area_end_ = area_start_ + body_size; | |
504 | |
505 return true; | |
506 } | |
507 | |
508 | |
473 void MemoryChunk::InsertAfter(MemoryChunk* other) { | 509 void MemoryChunk::InsertAfter(MemoryChunk* other) { |
474 next_chunk_ = other->next_chunk_; | 510 next_chunk_ = other->next_chunk_; |
475 prev_chunk_ = other; | 511 prev_chunk_ = other; |
476 other->next_chunk_->prev_chunk_ = this; | 512 other->next_chunk_->prev_chunk_ = this; |
477 other->next_chunk_ = this; | 513 other->next_chunk_ = this; |
478 } | 514 } |
479 | 515 |
480 | 516 |
481 void MemoryChunk::Unlink() { | 517 void MemoryChunk::Unlink() { |
482 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { | 518 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { |
483 heap_->decrement_scan_on_scavenge_pages(); | 519 heap_->decrement_scan_on_scavenge_pages(); |
484 ClearFlag(SCAN_ON_SCAVENGE); | 520 ClearFlag(SCAN_ON_SCAVENGE); |
485 } | 521 } |
486 next_chunk_->prev_chunk_ = prev_chunk_; | 522 next_chunk_->prev_chunk_ = prev_chunk_; |
487 prev_chunk_->next_chunk_ = next_chunk_; | 523 prev_chunk_->next_chunk_ = next_chunk_; |
488 prev_chunk_ = NULL; | 524 prev_chunk_ = NULL; |
489 next_chunk_ = NULL; | 525 next_chunk_ = NULL; |
490 } | 526 } |
491 | 527 |
492 | 528 |
493 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, | 529 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_body_size, |
530 intptr_t commit_body_size, | |
494 Executability executable, | 531 Executability executable, |
495 Space* owner) { | 532 Space* owner) { |
533 ASSERT(commit_body_size <= reserve_body_size); | |
534 | |
496 size_t chunk_size; | 535 size_t chunk_size; |
497 Heap* heap = isolate_->heap(); | 536 Heap* heap = isolate_->heap(); |
498 Address base = NULL; | 537 Address base = NULL; |
499 VirtualMemory reservation; | 538 VirtualMemory reservation; |
500 Address area_start = NULL; | 539 Address area_start = NULL; |
501 Address area_end = NULL; | 540 Address area_end = NULL; |
502 | 541 |
503 if (executable == EXECUTABLE) { | 542 if (executable == EXECUTABLE) { |
504 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, | 543 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_body_size, |
505 OS::CommitPageSize()) + CodePageGuardSize(); | 544 OS::CommitPageSize()) + CodePageGuardSize(); |
506 | 545 |
507 // Check executable memory limit. | 546 // Check executable memory limit. |
508 if (size_executable_ + chunk_size > capacity_executable_) { | 547 if (size_executable_ + chunk_size > capacity_executable_) { |
509 LOG(isolate_, | 548 LOG(isolate_, |
510 StringEvent("MemoryAllocator::AllocateRawMemory", | 549 StringEvent("MemoryAllocator::AllocateRawMemory", |
511 "V8 Executable Allocation capacity exceeded")); | 550 "V8 Executable Allocation capacity exceeded")); |
512 return NULL; | 551 return NULL; |
513 } | 552 } |
514 | 553 |
515 // Allocate executable memory either from code range or from the | 554 // Allocate executable memory either from code range or from the |
516 // OS. | 555 // OS. |
517 if (isolate_->code_range()->exists()) { | 556 if (isolate_->code_range()->exists()) { |
518 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); | 557 base = isolate_->code_range()->AllocateRawMemory(chunk_size, |
558 &chunk_size, | |
559 commit_body_size); | |
519 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), | 560 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
520 MemoryChunk::kAlignment)); | 561 MemoryChunk::kAlignment)); |
521 if (base == NULL) return NULL; | 562 if (base == NULL) return NULL; |
522 size_ += chunk_size; | 563 size_ += chunk_size; |
523 // Update executable memory size. | 564 // Update executable memory size. |
524 size_executable_ += chunk_size; | 565 size_executable_ += chunk_size; |
525 } else { | 566 } else { |
526 base = AllocateAlignedMemory(chunk_size, | 567 base = AllocateAlignedMemory(chunk_size, |
527 MemoryChunk::kAlignment, | 568 MemoryChunk::kAlignment, |
569 commit_body_size, | |
528 executable, | 570 executable, |
529 &reservation); | 571 &reservation); |
530 if (base == NULL) return NULL; | 572 if (base == NULL) return NULL; |
531 // Update executable memory size. | 573 // Update executable memory size. |
532 size_executable_ += reservation.size(); | 574 size_executable_ += reservation.size(); |
533 } | 575 } |
534 | 576 |
535 if (Heap::ShouldZapGarbage()) { | 577 if (Heap::ShouldZapGarbage()) { |
536 ZapBlock(base, CodePageGuardStartOffset()); | 578 ZapBlock(base, CodePageGuardStartOffset()); |
537 ZapBlock(base + CodePageAreaStartOffset(), body_size); | 579 ZapBlock(base + CodePageAreaStartOffset(), commit_body_size); |
538 } | 580 } |
539 | 581 |
540 area_start = base + CodePageAreaStartOffset(); | 582 area_start = base + CodePageAreaStartOffset(); |
541 area_end = area_start + body_size; | 583 area_end = area_start + commit_body_size; |
danno
2013/01/17 10:49:55
area_end should always be the end of the reserved
haitao.feng
2013/01/17 14:20:33
The end could be calculated by address() + size_.
| |
542 } else { | 584 } else { |
543 chunk_size = MemoryChunk::kObjectStartOffset + body_size; | 585 chunk_size = MemoryChunk::kObjectStartOffset + reserve_body_size; |
544 base = AllocateAlignedMemory(chunk_size, | 586 base = AllocateAlignedMemory(chunk_size, |
545 MemoryChunk::kAlignment, | 587 MemoryChunk::kAlignment, |
588 commit_body_size, | |
546 executable, | 589 executable, |
547 &reservation); | 590 &reservation); |
548 | 591 |
549 if (base == NULL) return NULL; | 592 if (base == NULL) return NULL; |
550 | 593 |
551 if (Heap::ShouldZapGarbage()) { | 594 if (Heap::ShouldZapGarbage()) { |
552 ZapBlock(base, chunk_size); | 595 ZapBlock(base, MemoryChunk::kObjectStartOffset + commit_body_size); |
553 } | 596 } |
554 | 597 |
555 area_start = base + Page::kObjectStartOffset; | 598 area_start = base + Page::kObjectStartOffset; |
556 area_end = base + chunk_size; | 599 area_end = area_start + commit_body_size; |
557 } | 600 } |
558 | 601 |
602 // Use chunk_size for statistics and callbacks as the reserved | |
603 // but uncommitted memory regions are only meaningful for this allocated | |
604 // memory trunk. | |
danno
2013/01/17 10:49:55
nit: I don't know what you mean by "allocated memo
haitao.feng
2013/01/17 14:20:33
Done.
| |
559 isolate_->counters()->memory_allocated()-> | 605 isolate_->counters()->memory_allocated()-> |
560 Increment(static_cast<int>(chunk_size)); | 606 Increment(static_cast<int>(chunk_size)); |
561 | 607 |
562 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); | 608 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
563 if (owner != NULL) { | 609 if (owner != NULL) { |
564 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | 610 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
565 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); | 611 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
566 } | 612 } |
567 | 613 |
568 MemoryChunk* result = MemoryChunk::Initialize(heap, | 614 MemoryChunk* result = MemoryChunk::Initialize(heap, |
569 base, | 615 base, |
570 chunk_size, | 616 chunk_size, |
571 area_start, | 617 area_start, |
572 area_end, | 618 area_end, |
573 executable, | 619 executable, |
574 owner); | 620 owner); |
575 result->set_reserved_memory(&reservation); | 621 result->set_reserved_memory(&reservation); |
576 return result; | 622 return result; |
577 } | 623 } |
578 | 624 |
579 | 625 |
580 Page* MemoryAllocator::AllocatePage(intptr_t size, | 626 Page* MemoryAllocator::AllocatePage(intptr_t size, |
581 PagedSpace* owner, | 627 PagedSpace* owner, |
582 Executability executable) { | 628 Executability executable) { |
583 MemoryChunk* chunk = AllocateChunk(size, executable, owner); | 629 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); |
584 | 630 |
585 if (chunk == NULL) return NULL; | 631 if (chunk == NULL) return NULL; |
586 | 632 |
587 return Page::Initialize(isolate_->heap(), chunk, executable, owner); | 633 return Page::Initialize(isolate_->heap(), chunk, executable, owner); |
588 } | 634 } |
589 | 635 |
590 | 636 |
591 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 637 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
592 Space* owner, | 638 Space* owner, |
593 Executability executable) { | 639 Executability executable) { |
594 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); | 640 MemoryChunk* chunk = AllocateChunk(object_size, |
641 object_size, | |
642 executable, | |
643 owner); | |
595 if (chunk == NULL) return NULL; | 644 if (chunk == NULL) return NULL; |
596 return LargePage::Initialize(isolate_->heap(), chunk); | 645 return LargePage::Initialize(isolate_->heap(), chunk); |
597 } | 646 } |
598 | 647 |
599 | 648 |
600 void MemoryAllocator::Free(MemoryChunk* chunk) { | 649 void MemoryAllocator::Free(MemoryChunk* chunk) { |
601 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 650 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
602 if (chunk->owner() != NULL) { | 651 if (chunk->owner() != NULL) { |
603 ObjectSpace space = | 652 ObjectSpace space = |
604 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | 653 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
728 | 777 |
729 int MemoryAllocator::CodePageAreaEndOffset() { | 778 int MemoryAllocator::CodePageAreaEndOffset() { |
730 // We are guarding code pages: the last OS page will be protected as | 779 // We are guarding code pages: the last OS page will be protected as |
731 // non-writable. | 780 // non-writable. |
732 return Page::kPageSize - static_cast<int>(OS::CommitPageSize()); | 781 return Page::kPageSize - static_cast<int>(OS::CommitPageSize()); |
733 } | 782 } |
734 | 783 |
735 | 784 |
736 bool MemoryAllocator::CommitCodePage(VirtualMemory* vm, | 785 bool MemoryAllocator::CommitCodePage(VirtualMemory* vm, |
737 Address start, | 786 Address start, |
738 size_t size) { | 787 size_t size, |
danno
2013/01/17 10:49:55
I think it's clearer if you swap the order of the
haitao.feng
2013/01/17 14:20:33
Done.
| |
788 size_t body_size) { | |
739 // Commit page header (not executable). | 789 // Commit page header (not executable). |
740 if (!vm->Commit(start, | 790 if (!vm->Commit(start, |
741 CodePageGuardStartOffset(), | 791 CodePageGuardStartOffset(), |
742 false)) { | 792 false)) { |
743 return false; | 793 return false; |
744 } | 794 } |
745 | 795 |
746 // Create guard page after the header. | 796 // Create guard page after the header. |
747 if (!vm->Guard(start + CodePageGuardStartOffset())) { | 797 if (!vm->Guard(start + CodePageGuardStartOffset())) { |
748 return false; | 798 return false; |
749 } | 799 } |
750 | 800 |
751 // Commit page body (executable). | 801 // Commit page body (executable). |
752 size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize(); | |
753 if (!vm->Commit(start + CodePageAreaStartOffset(), | 802 if (!vm->Commit(start + CodePageAreaStartOffset(), |
754 area_size, | 803 body_size, |
755 true)) { | 804 true)) { |
756 return false; | 805 return false; |
757 } | 806 } |
758 | 807 |
759 // Create guard page after the allocatable area. | 808 // Create guard page before the end. |
760 if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) { | 809 if (!vm->Guard(start + size - CodePageGuardSize())) { |
761 return false; | 810 return false; |
762 } | 811 } |
763 | 812 |
764 return true; | 813 return true; |
765 } | 814 } |
766 | 815 |
767 | 816 |
768 // ----------------------------------------------------------------------------- | 817 // ----------------------------------------------------------------------------- |
769 // MemoryChunk implementation | 818 // MemoryChunk implementation |
770 | 819 |
(...skipping 2215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2986 object->ShortPrint(); | 3035 object->ShortPrint(); |
2987 PrintF("\n"); | 3036 PrintF("\n"); |
2988 } | 3037 } |
2989 printf(" --------------------------------------\n"); | 3038 printf(" --------------------------------------\n"); |
2990 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3039 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
2991 } | 3040 } |
2992 | 3041 |
2993 #endif // DEBUG | 3042 #endif // DEBUG |
2994 | 3043 |
2995 } } // namespace v8::internal | 3044 } } // namespace v8::internal |
OLD | NEW |