Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/spaces.cc

Issue 6696042: Adding 'isolates' argument to LOG to get rid of multiple TLS fetches in profiling. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/isolates
Patch Set: Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2006-2010 the V8 project authors. All rights reserved. 1 // Copyright 2006-2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
163 code_range_ = new VirtualMemory(requested); 163 code_range_ = new VirtualMemory(requested);
164 CHECK(code_range_ != NULL); 164 CHECK(code_range_ != NULL);
165 if (!code_range_->IsReserved()) { 165 if (!code_range_->IsReserved()) {
166 delete code_range_; 166 delete code_range_;
167 code_range_ = NULL; 167 code_range_ = NULL;
168 return false; 168 return false;
169 } 169 }
170 170
171 // We are sure that we have mapped a block of requested addresses. 171 // We are sure that we have mapped a block of requested addresses.
172 ASSERT(code_range_->size() == requested); 172 ASSERT(code_range_->size() == requested);
173 LOG(NewEvent("CodeRange", code_range_->address(), requested)); 173 LOG(isolate_,NewEvent("CodeRange", code_range_->address(), requested));
174 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size())); 174 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
175 current_allocation_block_index_ = 0; 175 current_allocation_block_index_ = 0;
176 return true; 176 return true;
177 } 177 }
178 178
179 179
180 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, 180 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
181 const FreeBlock* right) { 181 const FreeBlock* right) {
182 // The entire point of CodeRange is that the difference between two 182 // The entire point of CodeRange is that the difference between two
183 // addresses in the range can be represented as a signed 32-bit int, 183 // addresses in the range can be represented as a signed 32-bit int,
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
337 337
338 338
339 void MemoryAllocator::TearDown() { 339 void MemoryAllocator::TearDown() {
340 for (int i = 0; i < max_nof_chunks_; i++) { 340 for (int i = 0; i < max_nof_chunks_; i++) {
341 if (chunks_[i].address() != NULL) DeleteChunk(i); 341 if (chunks_[i].address() != NULL) DeleteChunk(i);
342 } 342 }
343 chunks_.Clear(); 343 chunks_.Clear();
344 free_chunk_ids_.Clear(); 344 free_chunk_ids_.Clear();
345 345
346 if (initial_chunk_ != NULL) { 346 if (initial_chunk_ != NULL) {
347 LOG(DeleteEvent("InitialChunk", initial_chunk_->address())); 347 LOG(isolate_,DeleteEvent("InitialChunk", initial_chunk_->address()));
348 delete initial_chunk_; 348 delete initial_chunk_;
349 initial_chunk_ = NULL; 349 initial_chunk_ = NULL;
350 } 350 }
351 351
352 ASSERT(top_ == max_nof_chunks_); // all chunks are free 352 ASSERT(top_ == max_nof_chunks_); // all chunks are free
353 top_ = 0; 353 top_ = 0;
354 capacity_ = 0; 354 capacity_ = 0;
355 capacity_executable_ = 0; 355 capacity_executable_ = 0;
356 size_ = 0; 356 size_ = 0;
357 max_nof_chunks_ = 0; 357 max_nof_chunks_ = 0;
358 } 358 }
359 359
360 360
361 void* MemoryAllocator::AllocateRawMemory(const size_t requested, 361 void* MemoryAllocator::AllocateRawMemory(const size_t requested,
362 size_t* allocated, 362 size_t* allocated,
363 Executability executable) { 363 Executability executable) {
364 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) { 364 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
365 return NULL; 365 return NULL;
366 } 366 }
367 367
368 void* mem; 368 void* mem;
369 if (executable == EXECUTABLE) { 369 if (executable == EXECUTABLE) {
370 // Check executable memory limit. 370 // Check executable memory limit.
371 if (size_executable_ + requested > 371 if (size_executable_ + requested >
372 static_cast<size_t>(capacity_executable_)) { 372 static_cast<size_t>(capacity_executable_)) {
373 LOG(StringEvent("MemoryAllocator::AllocateRawMemory", 373 LOG(isolate_,StringEvent("MemoryAllocator::AllocateRawMemory",
374 "V8 Executable Allocation capacity exceeded")); 374 "V8 Executable Allocation capacity exceeded"));
375 return NULL; 375 return NULL;
376 } 376 }
377 // Allocate executable memory either from code range or from the 377 // Allocate executable memory either from code range or from the
378 // OS. 378 // OS.
379 if (isolate_->code_range()->exists()) { 379 if (isolate_->code_range()->exists()) {
380 mem = isolate_->code_range()->AllocateRawMemory(requested, allocated); 380 mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
381 } else { 381 } else {
382 mem = OS::Allocate(requested, allocated, true); 382 mem = OS::Allocate(requested, allocated, true);
383 } 383 }
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
468 initial_chunk_ = new VirtualMemory(requested); 468 initial_chunk_ = new VirtualMemory(requested);
469 CHECK(initial_chunk_ != NULL); 469 CHECK(initial_chunk_ != NULL);
470 if (!initial_chunk_->IsReserved()) { 470 if (!initial_chunk_->IsReserved()) {
471 delete initial_chunk_; 471 delete initial_chunk_;
472 initial_chunk_ = NULL; 472 initial_chunk_ = NULL;
473 return NULL; 473 return NULL;
474 } 474 }
475 475
476 // We are sure that we have mapped a block of requested addresses. 476 // We are sure that we have mapped a block of requested addresses.
477 ASSERT(initial_chunk_->size() == requested); 477 ASSERT(initial_chunk_->size() == requested);
478 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested)); 478 LOG(isolate_,NewEvent("InitialChunk", initial_chunk_->address(), requested));
479 size_ += static_cast<int>(requested); 479 size_ += static_cast<int>(requested);
480 return initial_chunk_->address(); 480 return initial_chunk_->address();
481 } 481 }
482 482
483 483
484 static int PagesInChunk(Address start, size_t size) { 484 static int PagesInChunk(Address start, size_t size) {
485 // The first page starts on the first page-aligned address from start onward 485 // The first page starts on the first page-aligned address from start onward
486 // and the last page ends on the last page-aligned address before 486 // and the last page ends on the last page-aligned address before
487 // start+size. Page::kPageSize is a power of two so we can divide by 487 // start+size. Page::kPageSize is a power of two so we can divide by
488 // shifting. 488 // shifting.
489 return static_cast<int>((RoundDown(start + size, Page::kPageSize) 489 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
490 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits); 490 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
491 } 491 }
492 492
493 493
494 Page* MemoryAllocator::AllocatePages(int requested_pages, 494 Page* MemoryAllocator::AllocatePages(int requested_pages,
495 int* allocated_pages, 495 int* allocated_pages,
496 PagedSpace* owner) { 496 PagedSpace* owner) {
497 if (requested_pages <= 0) return Page::FromAddress(NULL); 497 if (requested_pages <= 0) return Page::FromAddress(NULL);
498 size_t chunk_size = requested_pages * Page::kPageSize; 498 size_t chunk_size = requested_pages * Page::kPageSize;
499 499
500 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); 500 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
501 if (chunk == NULL) return Page::FromAddress(NULL); 501 if (chunk == NULL) return Page::FromAddress(NULL);
502 LOG(NewEvent("PagedChunk", chunk, chunk_size)); 502 LOG(isolate_,NewEvent("PagedChunk", chunk, chunk_size));
503 503
504 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); 504 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
505 // We may 'lose' a page due to alignment. 505 // We may 'lose' a page due to alignment.
506 ASSERT(*allocated_pages >= kPagesPerChunk - 1); 506 ASSERT(*allocated_pages >= kPagesPerChunk - 1);
507 if (*allocated_pages == 0) { 507 if (*allocated_pages == 0) {
508 FreeRawMemory(chunk, chunk_size, owner->executable()); 508 FreeRawMemory(chunk, chunk_size, owner->executable());
509 LOG(DeleteEvent("PagedChunk", chunk)); 509 LOG(isolate_,DeleteEvent("PagedChunk", chunk));
510 return Page::FromAddress(NULL); 510 return Page::FromAddress(NULL);
511 } 511 }
512 512
513 int chunk_id = Pop(); 513 int chunk_id = Pop();
514 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); 514 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
515 515
516 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 516 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
517 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 517 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
518 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner); 518 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
519 519
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
668 668
669 // We cannot free a chunk contained in the initial chunk because it was not 669 // We cannot free a chunk contained in the initial chunk because it was not
670 // allocated with AllocateRawMemory. Instead we uncommit the virtual 670 // allocated with AllocateRawMemory. Instead we uncommit the virtual
671 // memory. 671 // memory.
672 if (InInitialChunk(c.address())) { 672 if (InInitialChunk(c.address())) {
673 // TODO(1240712): VirtualMemory::Uncommit has a return value which 673 // TODO(1240712): VirtualMemory::Uncommit has a return value which
674 // is ignored here. 674 // is ignored here.
675 initial_chunk_->Uncommit(c.address(), c.size()); 675 initial_chunk_->Uncommit(c.address(), c.size());
676 COUNTERS->memory_allocated()->Decrement(static_cast<int>(c.size())); 676 COUNTERS->memory_allocated()->Decrement(static_cast<int>(c.size()));
677 } else { 677 } else {
678 LOG(DeleteEvent("PagedChunk", c.address())); 678 LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
679 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity()); 679 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
680 size_t size = c.size(); 680 size_t size = c.size();
681 FreeRawMemory(c.address(), size, c.executable()); 681 FreeRawMemory(c.address(), size, c.executable());
682 PerformAllocationCallback(space, kAllocationActionFree, size); 682 PerformAllocationCallback(space, kAllocationActionFree, size);
683 } 683 }
684 c.init(NULL, 0, NULL); 684 c.init(NULL, 0, NULL);
685 Push(chunk_id); 685 Push(chunk_id);
686 } 686 }
687 687
688 688
(...skipping 963 matching lines...) Expand 10 before | Expand all | Expand 10 after
1652 // flag is set. 1652 // flag is set.
1653 void NewSpace::CollectStatistics() { 1653 void NewSpace::CollectStatistics() {
1654 ClearHistograms(); 1654 ClearHistograms();
1655 SemiSpaceIterator it(this); 1655 SemiSpaceIterator it(this);
1656 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) 1656 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1657 RecordAllocation(obj); 1657 RecordAllocation(obj);
1658 } 1658 }
1659 1659
1660 1660
1661 #ifdef ENABLE_LOGGING_AND_PROFILING 1661 #ifdef ENABLE_LOGGING_AND_PROFILING
1662 static void DoReportStatistics(HistogramInfo* info, const char* description) { 1662 static void DoReportStatistics(Isolate* isolate,
1663 LOG(HeapSampleBeginEvent("NewSpace", description)); 1663 HistogramInfo* info, const char* description) {
1664 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1664 // Lump all the string types together. 1665 // Lump all the string types together.
1665 int string_number = 0; 1666 int string_number = 0;
1666 int string_bytes = 0; 1667 int string_bytes = 0;
1667 #define INCREMENT(type, size, name, camel_name) \ 1668 #define INCREMENT(type, size, name, camel_name) \
1668 string_number += info[type].number(); \ 1669 string_number += info[type].number(); \
1669 string_bytes += info[type].bytes(); 1670 string_bytes += info[type].bytes();
1670 STRING_TYPE_LIST(INCREMENT) 1671 STRING_TYPE_LIST(INCREMENT)
1671 #undef INCREMENT 1672 #undef INCREMENT
1672 if (string_number > 0) { 1673 if (string_number > 0) {
1673 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); 1674 LOG(isolate, HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes) );
1674 } 1675 }
1675 1676
1676 // Then do the other types. 1677 // Then do the other types.
1677 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { 1678 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1678 if (info[i].number() > 0) { 1679 if (info[i].number() > 0) {
1679 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(), 1680 LOG(isolate,
1681 HeapSampleItemEvent(info[i].name(), info[i].number(),
1680 info[i].bytes())); 1682 info[i].bytes()));
1681 } 1683 }
1682 } 1684 }
1683 LOG(HeapSampleEndEvent("NewSpace", description)); 1685 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1684 } 1686 }
1685 #endif // ENABLE_LOGGING_AND_PROFILING 1687 #endif // ENABLE_LOGGING_AND_PROFILING
1686 1688
1687 1689
1688 void NewSpace::ReportStatistics() { 1690 void NewSpace::ReportStatistics() {
1689 #ifdef DEBUG 1691 #ifdef DEBUG
1690 if (FLAG_heap_stats) { 1692 if (FLAG_heap_stats) {
1691 float pct = static_cast<float>(Available()) / Capacity(); 1693 float pct = static_cast<float>(Available()) / Capacity();
1692 PrintF(" capacity: %" V8_PTR_PREFIX "d" 1694 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1693 ", available: %" V8_PTR_PREFIX "d, %%%d\n", 1695 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1694 Capacity(), Available(), static_cast<int>(pct*100)); 1696 Capacity(), Available(), static_cast<int>(pct*100));
1695 PrintF("\n Object Histogram:\n"); 1697 PrintF("\n Object Histogram:\n");
1696 for (int i = 0; i <= LAST_TYPE; i++) { 1698 for (int i = 0; i <= LAST_TYPE; i++) {
1697 if (allocated_histogram_[i].number() > 0) { 1699 if (allocated_histogram_[i].number() > 0) {
1698 PrintF(" %-34s%10d (%10d bytes)\n", 1700 PrintF(" %-34s%10d (%10d bytes)\n",
1699 allocated_histogram_[i].name(), 1701 allocated_histogram_[i].name(),
1700 allocated_histogram_[i].number(), 1702 allocated_histogram_[i].number(),
1701 allocated_histogram_[i].bytes()); 1703 allocated_histogram_[i].bytes());
1702 } 1704 }
1703 } 1705 }
1704 PrintF("\n"); 1706 PrintF("\n");
1705 } 1707 }
1706 #endif // DEBUG 1708 #endif // DEBUG
1707 1709
1708 #ifdef ENABLE_LOGGING_AND_PROFILING 1710 #ifdef ENABLE_LOGGING_AND_PROFILING
1709 if (FLAG_log_gc) { 1711 if (FLAG_log_gc) {
1710 DoReportStatistics(allocated_histogram_, "allocated"); 1712 Isolate* isolate = ISOLATE;
1711 DoReportStatistics(promoted_histogram_, "promoted"); 1713 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1714 DoReportStatistics(isolate, promoted_histogram_, "promoted");
1712 } 1715 }
1713 #endif // ENABLE_LOGGING_AND_PROFILING 1716 #endif // ENABLE_LOGGING_AND_PROFILING
1714 } 1717 }
1715 1718
1716 1719
1717 void NewSpace::RecordAllocation(HeapObject* obj) { 1720 void NewSpace::RecordAllocation(HeapObject* obj) {
1718 InstanceType type = obj->map()->instance_type(); 1721 InstanceType type = obj->map()->instance_type();
1719 ASSERT(0 <= type && type <= LAST_TYPE); 1722 ASSERT(0 <= type && type <= LAST_TYPE);
1720 allocated_histogram_[type].increment_number(1); 1723 allocated_histogram_[type].increment_number(1);
1721 allocated_histogram_[type].increment_bytes(obj->Size()); 1724 allocated_histogram_[type].increment_bytes(obj->Size());
(...skipping 987 matching lines...) Expand 10 before | Expand all | Expand 10 after
2709 } 2712 }
2710 2713
2711 2714
2712 // ----------------------------------------------------------------------------- 2715 // -----------------------------------------------------------------------------
2713 // LargeObjectChunk 2716 // LargeObjectChunk
2714 2717
2715 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, 2718 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2716 Executability executable) { 2719 Executability executable) {
2717 size_t requested = ChunkSizeFor(size_in_bytes); 2720 size_t requested = ChunkSizeFor(size_in_bytes);
2718 size_t size; 2721 size_t size;
2719 void* mem = Isolate::Current()->memory_allocator()->AllocateRawMemory( 2722 Isolate* isolate = Isolate::Current();
2723 void* mem = isolate->memory_allocator()->AllocateRawMemory(
2720 requested, &size, executable); 2724 requested, &size, executable);
2721 if (mem == NULL) return NULL; 2725 if (mem == NULL) return NULL;
2722 2726
2723 // The start of the chunk may be overlayed with a page so we have to 2727 // The start of the chunk may be overlayed with a page so we have to
2724 // make sure that the page flags fit in the size field. 2728 // make sure that the page flags fit in the size field.
2725 ASSERT((size & Page::kPageFlagMask) == 0); 2729 ASSERT((size & Page::kPageFlagMask) == 0);
2726 2730
2727 LOG(NewEvent("LargeObjectChunk", mem, size)); 2731 LOG(isolate,NewEvent("LargeObjectChunk", mem, size));
2728 if (size < requested) { 2732 if (size < requested) {
2729 Isolate::Current()->memory_allocator()->FreeRawMemory( 2733 Isolate::Current()->memory_allocator()->FreeRawMemory(
2730 mem, size, executable); 2734 mem, size, executable);
2731 LOG(DeleteEvent("LargeObjectChunk", mem)); 2735 LOG(isolate,DeleteEvent("LargeObjectChunk", mem));
2732 return NULL; 2736 return NULL;
2733 } 2737 }
2734 2738
2735 ObjectSpace space = (executable == EXECUTABLE) 2739 ObjectSpace space = (executable == EXECUTABLE)
2736 ? kObjectSpaceCodeSpace 2740 ? kObjectSpaceCodeSpace
2737 : kObjectSpaceLoSpace; 2741 : kObjectSpaceLoSpace;
2738 Isolate::Current()->memory_allocator()->PerformAllocationCallback( 2742 isolate->memory_allocator()->PerformAllocationCallback(
2739 space, kAllocationActionAllocate, size); 2743 space, kAllocationActionAllocate, size);
2740 2744
2741 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem); 2745 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2742 chunk->size_ = size; 2746 chunk->size_ = size;
2743 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2747 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2744 page->heap_ = Isolate::Current()->heap(); 2748 page->heap_ = Isolate::Current()->heap();
2745 return chunk; 2749 return chunk;
2746 } 2750 }
2747 2751
2748 2752
(...skipping 22 matching lines...) Expand all
2771 page_count_ = 0; 2775 page_count_ = 0;
2772 objects_size_ = 0; 2776 objects_size_ = 0;
2773 return true; 2777 return true;
2774 } 2778 }
2775 2779
2776 2780
2777 void LargeObjectSpace::TearDown() { 2781 void LargeObjectSpace::TearDown() {
2778 while (first_chunk_ != NULL) { 2782 while (first_chunk_ != NULL) {
2779 LargeObjectChunk* chunk = first_chunk_; 2783 LargeObjectChunk* chunk = first_chunk_;
2780 first_chunk_ = first_chunk_->next(); 2784 first_chunk_ = first_chunk_->next();
2781 LOG(DeleteEvent("LargeObjectChunk", chunk->address())); 2785 LOG(heap()->isolate(),DeleteEvent("LargeObjectChunk", chunk->address()));
2782 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2786 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2783 Executability executable = 2787 Executability executable =
2784 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE; 2788 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2785 ObjectSpace space = kObjectSpaceLoSpace; 2789 ObjectSpace space = kObjectSpaceLoSpace;
2786 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; 2790 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2787 size_t size = chunk->size(); 2791 size_t size = chunk->size();
2788 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(), 2792 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
2789 size, 2793 size,
2790 executable); 2794 executable);
2791 heap()->isolate()->memory_allocator()->PerformAllocationCallback( 2795 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
3007 size_ -= static_cast<int>(chunk_size); 3011 size_ -= static_cast<int>(chunk_size);
3008 objects_size_ -= object->Size(); 3012 objects_size_ -= object->Size();
3009 page_count_--; 3013 page_count_--;
3010 ObjectSpace space = kObjectSpaceLoSpace; 3014 ObjectSpace space = kObjectSpaceLoSpace;
3011 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; 3015 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
3012 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address, 3016 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
3013 chunk_size, 3017 chunk_size,
3014 executable); 3018 executable);
3015 heap()->isolate()->memory_allocator()->PerformAllocationCallback( 3019 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
3016 space, kAllocationActionFree, size_); 3020 space, kAllocationActionFree, size_);
3017 LOG(DeleteEvent("LargeObjectChunk", chunk_address)); 3021 LOG(heap()->isolate(),DeleteEvent("LargeObjectChunk", chunk_address));
3018 } 3022 }
3019 } 3023 }
3020 } 3024 }
3021 3025
3022 3026
3023 bool LargeObjectSpace::Contains(HeapObject* object) { 3027 bool LargeObjectSpace::Contains(HeapObject* object) {
3024 Address address = object->address(); 3028 Address address = object->address();
3025 if (heap()->new_space()->Contains(address)) { 3029 if (heap()->new_space()->Contains(address)) {
3026 return false; 3030 return false;
3027 } 3031 }
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
3124 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 3128 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
3125 if (obj->IsCode()) { 3129 if (obj->IsCode()) {
3126 Code* code = Code::cast(obj); 3130 Code* code = Code::cast(obj);
3127 isolate->code_kind_statistics()[code->kind()] += code->Size(); 3131 isolate->code_kind_statistics()[code->kind()] += code->Size();
3128 } 3132 }
3129 } 3133 }
3130 } 3134 }
3131 #endif // DEBUG 3135 #endif // DEBUG
3132 3136
3133 } } // namespace v8::internal 3137 } } // namespace v8::internal
OLDNEW
« src/compiler.cc ('K') | « src/serialize.cc ('k') | src/string-stream.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698