OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
263 delete code_range_; // Frees all memory in the virtual memory range. | 263 delete code_range_; // Frees all memory in the virtual memory range. |
264 code_range_ = NULL; | 264 code_range_ = NULL; |
265 free_list_.Free(); | 265 free_list_.Free(); |
266 allocation_list_.Free(); | 266 allocation_list_.Free(); |
267 } | 267 } |
268 | 268 |
269 | 269 |
270 // ----------------------------------------------------------------------------- | 270 // ----------------------------------------------------------------------------- |
271 // MemoryAllocator | 271 // MemoryAllocator |
272 // | 272 // |
273 int MemoryAllocator::capacity_ = 0; | 273 intptr_t MemoryAllocator::capacity_ = 0; |
274 int MemoryAllocator::size_ = 0; | 274 intptr_t MemoryAllocator::size_ = 0; |
275 int MemoryAllocator::size_executable_ = 0; | 275 intptr_t MemoryAllocator::size_executable_ = 0; |
276 | 276 |
277 List<MemoryAllocator::MemoryAllocationCallbackRegistration> | 277 List<MemoryAllocator::MemoryAllocationCallbackRegistration> |
278 MemoryAllocator::memory_allocation_callbacks_; | 278 MemoryAllocator::memory_allocation_callbacks_; |
279 | 279 |
280 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; | 280 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; |
281 | 281 |
282 // 270 is an estimate based on the static default heap size of a pair of 256K | 282 // 270 is an estimate based on the static default heap size of a pair of 256K |
283 // semispaces and a 64M old generation. | 283 // semispaces and a 64M old generation. |
284 const int kEstimatedNumberOfChunks = 270; | 284 const int kEstimatedNumberOfChunks = 270; |
285 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_( | 285 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_( |
286 kEstimatedNumberOfChunks); | 286 kEstimatedNumberOfChunks); |
287 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks); | 287 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks); |
288 int MemoryAllocator::max_nof_chunks_ = 0; | 288 int MemoryAllocator::max_nof_chunks_ = 0; |
289 int MemoryAllocator::top_ = 0; | 289 int MemoryAllocator::top_ = 0; |
290 | 290 |
291 | 291 |
292 void MemoryAllocator::Push(int free_chunk_id) { | 292 void MemoryAllocator::Push(int free_chunk_id) { |
293 ASSERT(max_nof_chunks_ > 0); | 293 ASSERT(max_nof_chunks_ > 0); |
294 ASSERT(top_ < max_nof_chunks_); | 294 ASSERT(top_ < max_nof_chunks_); |
295 free_chunk_ids_[top_++] = free_chunk_id; | 295 free_chunk_ids_[top_++] = free_chunk_id; |
296 } | 296 } |
297 | 297 |
298 | 298 |
299 int MemoryAllocator::Pop() { | 299 int MemoryAllocator::Pop() { |
300 ASSERT(top_ > 0); | 300 ASSERT(top_ > 0); |
301 return free_chunk_ids_[--top_]; | 301 return free_chunk_ids_[--top_]; |
302 } | 302 } |
303 | 303 |
304 | 304 |
305 bool MemoryAllocator::Setup(int capacity) { | 305 bool MemoryAllocator::Setup(intptr_t capacity) { |
306 capacity_ = RoundUp(capacity, Page::kPageSize); | 306 capacity_ = RoundUp(capacity, Page::kPageSize); |
307 | 307 |
308 // Over-estimate the size of chunks_ array. It assumes the expansion of old | 308 // Over-estimate the size of chunks_ array. It assumes the expansion of old |
309 // space is always in the unit of a chunk (kChunkSize) except the last | 309 // space is always in the unit of a chunk (kChunkSize) except the last |
310 // expansion. | 310 // expansion. |
311 // | 311 // |
312 // Due to alignment, allocated space might be one page less than required | 312 // Due to alignment, allocated space might be one page less than required |
313 // number (kPagesPerChunk) of pages for old spaces. | 313 // number (kPagesPerChunk) of pages for old spaces. |
314 // | 314 // |
315 // Reserve two chunk ids for semispaces, one for map space, one for old | 315 // Reserve two chunk ids for semispaces, one for map space, one for old |
316 // space, and one for code space. | 316 // space, and one for code space. |
317 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5; | 317 max_nof_chunks_ = |
| 318 static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5; |
318 if (max_nof_chunks_ > kMaxNofChunks) return false; | 319 if (max_nof_chunks_ > kMaxNofChunks) return false; |
319 | 320 |
320 size_ = 0; | 321 size_ = 0; |
321 size_executable_ = 0; | 322 size_executable_ = 0; |
322 ChunkInfo info; // uninitialized element. | 323 ChunkInfo info; // uninitialized element. |
323 for (int i = max_nof_chunks_ - 1; i >= 0; i--) { | 324 for (int i = max_nof_chunks_ - 1; i >= 0; i--) { |
324 chunks_.Add(info); | 325 chunks_.Add(info); |
325 free_chunk_ids_.Add(i); | 326 free_chunk_ids_.Add(i); |
326 } | 327 } |
327 top_ = max_nof_chunks_; | 328 top_ = max_nof_chunks_; |
(...skipping 356 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
684 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); | 685 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); |
685 ASSERT(chunk_start <= p->address() && p->address() < high); | 686 ASSERT(chunk_start <= p->address() && p->address() < high); |
686 | 687 |
687 return Page::FromAddress(high - Page::kPageSize); | 688 return Page::FromAddress(high - Page::kPageSize); |
688 } | 689 } |
689 | 690 |
690 | 691 |
691 #ifdef DEBUG | 692 #ifdef DEBUG |
692 void MemoryAllocator::ReportStatistics() { | 693 void MemoryAllocator::ReportStatistics() { |
693 float pct = static_cast<float>(capacity_ - size_) / capacity_; | 694 float pct = static_cast<float>(capacity_ - size_) / capacity_; |
694 PrintF(" capacity: %d, used: %d, available: %%%d\n\n", | 695 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 696 ", used: %" V8_PTR_PREFIX "d" |
| 697 ", available: %%%d\n\n", |
695 capacity_, size_, static_cast<int>(pct*100)); | 698 capacity_, size_, static_cast<int>(pct*100)); |
696 } | 699 } |
697 #endif | 700 #endif |
698 | 701 |
699 | 702 |
700 void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space, | 703 void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space, |
701 Page** first_page, | 704 Page** first_page, |
702 Page** last_page, | 705 Page** last_page, |
703 Page** last_page_in_use) { | 706 Page** last_page_in_use) { |
704 Page* first = NULL; | 707 Page* first = NULL; |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
762 } | 765 } |
763 | 766 |
764 return last_page; | 767 return last_page; |
765 } | 768 } |
766 | 769 |
767 | 770 |
768 | 771 |
769 // ----------------------------------------------------------------------------- | 772 // ----------------------------------------------------------------------------- |
770 // PagedSpace implementation | 773 // PagedSpace implementation |
771 | 774 |
772 PagedSpace::PagedSpace(int max_capacity, | 775 PagedSpace::PagedSpace(intptr_t max_capacity, |
773 AllocationSpace id, | 776 AllocationSpace id, |
774 Executability executable) | 777 Executability executable) |
775 : Space(id, executable) { | 778 : Space(id, executable) { |
776 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 779 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
777 * Page::kObjectAreaSize; | 780 * Page::kObjectAreaSize; |
778 accounting_stats_.Clear(); | 781 accounting_stats_.Clear(); |
779 | 782 |
780 allocation_info_.top = NULL; | 783 allocation_info_.top = NULL; |
781 allocation_info_.limit = NULL; | 784 allocation_info_.limit = NULL; |
782 | 785 |
783 mc_forwarding_info_.top = NULL; | 786 mc_forwarding_info_.top = NULL; |
784 mc_forwarding_info_.limit = NULL; | 787 mc_forwarding_info_.limit = NULL; |
785 } | 788 } |
786 | 789 |
787 | 790 |
788 bool PagedSpace::Setup(Address start, size_t size) { | 791 bool PagedSpace::Setup(Address start, size_t size) { |
789 if (HasBeenSetup()) return false; | 792 if (HasBeenSetup()) return false; |
790 | 793 |
791 int num_pages = 0; | 794 int num_pages = 0; |
792 // Try to use the virtual memory range passed to us. If it is too small to | 795 // Try to use the virtual memory range passed to us. If it is too small to |
793 // contain at least one page, ignore it and allocate instead. | 796 // contain at least one page, ignore it and allocate instead. |
794 int pages_in_chunk = PagesInChunk(start, size); | 797 int pages_in_chunk = PagesInChunk(start, size); |
795 if (pages_in_chunk > 0) { | 798 if (pages_in_chunk > 0) { |
796 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize), | 799 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize), |
797 Page::kPageSize * pages_in_chunk, | 800 Page::kPageSize * pages_in_chunk, |
798 this, &num_pages); | 801 this, &num_pages); |
799 } else { | 802 } else { |
800 int requested_pages = Min(MemoryAllocator::kPagesPerChunk, | 803 int requested_pages = |
801 max_capacity_ / Page::kObjectAreaSize); | 804 Min(MemoryAllocator::kPagesPerChunk, |
| 805 static_cast<int>(max_capacity_ / Page::kObjectAreaSize)); |
802 first_page_ = | 806 first_page_ = |
803 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this); | 807 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this); |
804 if (!first_page_->is_valid()) return false; | 808 if (!first_page_->is_valid()) return false; |
805 } | 809 } |
806 | 810 |
807 // We are sure that the first page is valid and that we have at least one | 811 // We are sure that the first page is valid and that we have at least one |
808 // page. | 812 // page. |
809 ASSERT(first_page_->is_valid()); | 813 ASSERT(first_page_->is_valid()); |
810 ASSERT(num_pages > 0); | 814 ASSERT(num_pages > 0); |
811 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); | 815 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); |
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
977 bool PagedSpace::Expand(Page* last_page) { | 981 bool PagedSpace::Expand(Page* last_page) { |
978 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); | 982 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); |
979 ASSERT(Capacity() % Page::kObjectAreaSize == 0); | 983 ASSERT(Capacity() % Page::kObjectAreaSize == 0); |
980 | 984 |
981 if (Capacity() == max_capacity_) return false; | 985 if (Capacity() == max_capacity_) return false; |
982 | 986 |
983 ASSERT(Capacity() < max_capacity_); | 987 ASSERT(Capacity() < max_capacity_); |
984 // Last page must be valid and its next page is invalid. | 988 // Last page must be valid and its next page is invalid. |
985 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid()); | 989 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid()); |
986 | 990 |
987 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize; | 991 int available_pages = |
| 992 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize); |
988 if (available_pages <= 0) return false; | 993 if (available_pages <= 0) return false; |
989 | 994 |
990 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); | 995 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); |
991 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this); | 996 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this); |
992 if (!p->is_valid()) return false; | 997 if (!p->is_valid()) return false; |
993 | 998 |
994 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize); | 999 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize); |
995 ASSERT(Capacity() <= max_capacity_); | 1000 ASSERT(Capacity() <= max_capacity_); |
996 | 1001 |
997 MemoryAllocator::SetNextPage(last_page, p); | 1002 MemoryAllocator::SetNextPage(last_page, p); |
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1257 V8::FatalProcessOutOfMemory("Failed to grow new space."); | 1262 V8::FatalProcessOutOfMemory("Failed to grow new space."); |
1258 } | 1263 } |
1259 } | 1264 } |
1260 } | 1265 } |
1261 allocation_info_.limit = to_space_.high(); | 1266 allocation_info_.limit = to_space_.high(); |
1262 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1267 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
1263 } | 1268 } |
1264 | 1269 |
1265 | 1270 |
1266 void NewSpace::Shrink() { | 1271 void NewSpace::Shrink() { |
1267 int new_capacity = Max(InitialCapacity(), 2 * Size()); | 1272 int new_capacity = Max(InitialCapacity(), 2 * IntSize()); |
1268 int rounded_new_capacity = | 1273 int rounded_new_capacity = |
1269 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment())); | 1274 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment())); |
1270 if (rounded_new_capacity < Capacity() && | 1275 if (rounded_new_capacity < Capacity() && |
1271 to_space_.ShrinkTo(rounded_new_capacity)) { | 1276 to_space_.ShrinkTo(rounded_new_capacity)) { |
1272 // Only shrink from space if we managed to shrink to space. | 1277 // Only shrink from space if we managed to shrink to space. |
1273 if (!from_space_.ShrinkTo(rounded_new_capacity)) { | 1278 if (!from_space_.ShrinkTo(rounded_new_capacity)) { |
1274 // If we managed to shrink to space but couldn't shrink from | 1279 // If we managed to shrink to space but couldn't shrink from |
1275 // space, attempt to grow to space again. | 1280 // space, attempt to grow to space again. |
1276 if (!to_space_.GrowTo(from_space_.Capacity())) { | 1281 if (!to_space_.GrowTo(from_space_.Capacity())) { |
1277 // We are in an inconsistent state because we could not | 1282 // We are in an inconsistent state because we could not |
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1636 } | 1641 } |
1637 LOG(HeapSampleEndEvent("NewSpace", description)); | 1642 LOG(HeapSampleEndEvent("NewSpace", description)); |
1638 } | 1643 } |
1639 #endif // ENABLE_LOGGING_AND_PROFILING | 1644 #endif // ENABLE_LOGGING_AND_PROFILING |
1640 | 1645 |
1641 | 1646 |
1642 void NewSpace::ReportStatistics() { | 1647 void NewSpace::ReportStatistics() { |
1643 #ifdef DEBUG | 1648 #ifdef DEBUG |
1644 if (FLAG_heap_stats) { | 1649 if (FLAG_heap_stats) { |
1645 float pct = static_cast<float>(Available()) / Capacity(); | 1650 float pct = static_cast<float>(Available()) / Capacity(); |
1646 PrintF(" capacity: %d, available: %d, %%%d\n", | 1651 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 1652 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
1647 Capacity(), Available(), static_cast<int>(pct*100)); | 1653 Capacity(), Available(), static_cast<int>(pct*100)); |
1648 PrintF("\n Object Histogram:\n"); | 1654 PrintF("\n Object Histogram:\n"); |
1649 for (int i = 0; i <= LAST_TYPE; i++) { | 1655 for (int i = 0; i <= LAST_TYPE; i++) { |
1650 if (allocated_histogram_[i].number() > 0) { | 1656 if (allocated_histogram_[i].number() > 0) { |
1651 PrintF(" %-34s%10d (%10d bytes)\n", | 1657 PrintF(" %-34s%10d (%10d bytes)\n", |
1652 allocated_histogram_[i].name(), | 1658 allocated_histogram_[i].name(), |
1653 allocated_histogram_[i].number(), | 1659 allocated_histogram_[i].number(), |
1654 allocated_histogram_[i].bytes()); | 1660 allocated_histogram_[i].bytes()); |
1655 } | 1661 } |
1656 } | 1662 } |
(...skipping 737 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2394 ASSERT(code->instruction_start() <= prev_pc && | 2400 ASSERT(code->instruction_start() <= prev_pc && |
2395 prev_pc <= code->instruction_end()); | 2401 prev_pc <= code->instruction_end()); |
2396 delta += static_cast<int>(code->instruction_end() - prev_pc); | 2402 delta += static_cast<int>(code->instruction_end() - prev_pc); |
2397 EnterComment("NoComment", delta); | 2403 EnterComment("NoComment", delta); |
2398 } | 2404 } |
2399 } | 2405 } |
2400 } | 2406 } |
2401 | 2407 |
2402 | 2408 |
2403 void OldSpace::ReportStatistics() { | 2409 void OldSpace::ReportStatistics() { |
2404 int pct = Available() * 100 / Capacity(); | 2410 int pct = static_cast<int>(Available() * 100 / Capacity()); |
2405 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", | 2411 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 2412 ", waste: %" V8_PTR_PREFIX "d" |
| 2413 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
2406 Capacity(), Waste(), Available(), pct); | 2414 Capacity(), Waste(), Available(), pct); |
2407 | 2415 |
2408 ClearHistograms(); | 2416 ClearHistograms(); |
2409 HeapObjectIterator obj_it(this); | 2417 HeapObjectIterator obj_it(this); |
2410 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) | 2418 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) |
2411 CollectHistogramInfo(obj); | 2419 CollectHistogramInfo(obj); |
2412 ReportHistogram(true); | 2420 ReportHistogram(true); |
2413 } | 2421 } |
2414 #endif | 2422 #endif |
2415 | 2423 |
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2551 ASSERT(size_in_bytes % size == 0); | 2559 ASSERT(size_in_bytes % size == 0); |
2552 Address end = start + size_in_bytes; | 2560 Address end = start + size_in_bytes; |
2553 for (Address a = start; a < end; a += size) { | 2561 for (Address a = start; a < end; a += size) { |
2554 Free(a, add_to_freelist); | 2562 Free(a, add_to_freelist); |
2555 } | 2563 } |
2556 } | 2564 } |
2557 | 2565 |
2558 | 2566 |
2559 #ifdef DEBUG | 2567 #ifdef DEBUG |
2560 void FixedSpace::ReportStatistics() { | 2568 void FixedSpace::ReportStatistics() { |
2561 int pct = Available() * 100 / Capacity(); | 2569 int pct = static_cast<int>(Available() * 100 / Capacity()); |
2562 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", | 2570 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 2571 ", waste: %" V8_PTR_PREFIX "d" |
| 2572 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
2563 Capacity(), Waste(), Available(), pct); | 2573 Capacity(), Waste(), Available(), pct); |
2564 | 2574 |
2565 ClearHistograms(); | 2575 ClearHistograms(); |
2566 HeapObjectIterator obj_it(this); | 2576 HeapObjectIterator obj_it(this); |
2567 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) | 2577 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) |
2568 CollectHistogramInfo(obj); | 2578 CollectHistogramInfo(obj); |
2569 ReportHistogram(false); | 2579 ReportHistogram(false); |
2570 } | 2580 } |
2571 #endif | 2581 #endif |
2572 | 2582 |
(...skipping 431 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3004 | 3014 |
3005 void LargeObjectSpace::Print() { | 3015 void LargeObjectSpace::Print() { |
3006 LargeObjectIterator it(this); | 3016 LargeObjectIterator it(this); |
3007 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { | 3017 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { |
3008 obj->Print(); | 3018 obj->Print(); |
3009 } | 3019 } |
3010 } | 3020 } |
3011 | 3021 |
3012 | 3022 |
3013 void LargeObjectSpace::ReportStatistics() { | 3023 void LargeObjectSpace::ReportStatistics() { |
3014 PrintF(" size: %d\n", size_); | 3024 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); |
3015 int num_objects = 0; | 3025 int num_objects = 0; |
3016 ClearHistograms(); | 3026 ClearHistograms(); |
3017 LargeObjectIterator it(this); | 3027 LargeObjectIterator it(this); |
3018 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { | 3028 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { |
3019 num_objects++; | 3029 num_objects++; |
3020 CollectHistogramInfo(obj); | 3030 CollectHistogramInfo(obj); |
3021 } | 3031 } |
3022 | 3032 |
3023 PrintF(" number of objects %d\n", num_objects); | 3033 PrintF(" number of objects %d\n", num_objects); |
3024 if (num_objects > 0) ReportHistogram(false); | 3034 if (num_objects > 0) ReportHistogram(false); |
3025 } | 3035 } |
3026 | 3036 |
3027 | 3037 |
3028 void LargeObjectSpace::CollectCodeStatistics() { | 3038 void LargeObjectSpace::CollectCodeStatistics() { |
3029 LargeObjectIterator obj_it(this); | 3039 LargeObjectIterator obj_it(this); |
3030 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 3040 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
3031 if (obj->IsCode()) { | 3041 if (obj->IsCode()) { |
3032 Code* code = Code::cast(obj); | 3042 Code* code = Code::cast(obj); |
3033 code_kind_statistics[code->kind()] += code->Size(); | 3043 code_kind_statistics[code->kind()] += code->Size(); |
3034 } | 3044 } |
3035 } | 3045 } |
3036 } | 3046 } |
3037 #endif // DEBUG | 3047 #endif // DEBUG |
3038 | 3048 |
3039 } } // namespace v8::internal | 3049 } } // namespace v8::internal |
OLD | NEW |