Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(436)

Side by Side Diff: src/spaces.cc

Issue 6696042: Adding 'isolates' argument to LOG to get rid of multiple TLS fetches in profiling. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/isolates
Patch Set: Addressing code review feedback + rebase Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/serialize.cc ('k') | src/string-stream.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2010 the V8 project authors. All rights reserved. 1 // Copyright 2006-2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
163 code_range_ = new VirtualMemory(requested); 163 code_range_ = new VirtualMemory(requested);
164 CHECK(code_range_ != NULL); 164 CHECK(code_range_ != NULL);
165 if (!code_range_->IsReserved()) { 165 if (!code_range_->IsReserved()) {
166 delete code_range_; 166 delete code_range_;
167 code_range_ = NULL; 167 code_range_ = NULL;
168 return false; 168 return false;
169 } 169 }
170 170
171 // We are sure that we have mapped a block of requested addresses. 171 // We are sure that we have mapped a block of requested addresses.
172 ASSERT(code_range_->size() == requested); 172 ASSERT(code_range_->size() == requested);
173 LOG(NewEvent("CodeRange", code_range_->address(), requested)); 173 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
174 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size())); 174 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
175 current_allocation_block_index_ = 0; 175 current_allocation_block_index_ = 0;
176 return true; 176 return true;
177 } 177 }
178 178
179 179
180 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, 180 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
181 const FreeBlock* right) { 181 const FreeBlock* right) {
182 // The entire point of CodeRange is that the difference between two 182 // The entire point of CodeRange is that the difference between two
183 // addresses in the range can be represented as a signed 32-bit int, 183 // addresses in the range can be represented as a signed 32-bit int,
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
337 337
338 338
339 void MemoryAllocator::TearDown() { 339 void MemoryAllocator::TearDown() {
340 for (int i = 0; i < max_nof_chunks_; i++) { 340 for (int i = 0; i < max_nof_chunks_; i++) {
341 if (chunks_[i].address() != NULL) DeleteChunk(i); 341 if (chunks_[i].address() != NULL) DeleteChunk(i);
342 } 342 }
343 chunks_.Clear(); 343 chunks_.Clear();
344 free_chunk_ids_.Clear(); 344 free_chunk_ids_.Clear();
345 345
346 if (initial_chunk_ != NULL) { 346 if (initial_chunk_ != NULL) {
347 LOG(DeleteEvent("InitialChunk", initial_chunk_->address())); 347 LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
348 delete initial_chunk_; 348 delete initial_chunk_;
349 initial_chunk_ = NULL; 349 initial_chunk_ = NULL;
350 } 350 }
351 351
352 ASSERT(top_ == max_nof_chunks_); // all chunks are free 352 ASSERT(top_ == max_nof_chunks_); // all chunks are free
353 top_ = 0; 353 top_ = 0;
354 capacity_ = 0; 354 capacity_ = 0;
355 capacity_executable_ = 0; 355 capacity_executable_ = 0;
356 size_ = 0; 356 size_ = 0;
357 max_nof_chunks_ = 0; 357 max_nof_chunks_ = 0;
358 } 358 }
359 359
360 360
361 void* MemoryAllocator::AllocateRawMemory(const size_t requested, 361 void* MemoryAllocator::AllocateRawMemory(const size_t requested,
362 size_t* allocated, 362 size_t* allocated,
363 Executability executable) { 363 Executability executable) {
364 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) { 364 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
365 return NULL; 365 return NULL;
366 } 366 }
367 367
368 void* mem; 368 void* mem;
369 if (executable == EXECUTABLE) { 369 if (executable == EXECUTABLE) {
370 // Check executable memory limit. 370 // Check executable memory limit.
371 if (size_executable_ + requested > 371 if (size_executable_ + requested >
372 static_cast<size_t>(capacity_executable_)) { 372 static_cast<size_t>(capacity_executable_)) {
373 LOG(StringEvent("MemoryAllocator::AllocateRawMemory", 373 LOG(isolate_,
374 StringEvent("MemoryAllocator::AllocateRawMemory",
374 "V8 Executable Allocation capacity exceeded")); 375 "V8 Executable Allocation capacity exceeded"));
375 return NULL; 376 return NULL;
376 } 377 }
377 // Allocate executable memory either from code range or from the 378 // Allocate executable memory either from code range or from the
378 // OS. 379 // OS.
379 if (isolate_->code_range()->exists()) { 380 if (isolate_->code_range()->exists()) {
380 mem = isolate_->code_range()->AllocateRawMemory(requested, allocated); 381 mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
381 } else { 382 } else {
382 mem = OS::Allocate(requested, allocated, true); 383 mem = OS::Allocate(requested, allocated, true);
383 } 384 }
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
468 initial_chunk_ = new VirtualMemory(requested); 469 initial_chunk_ = new VirtualMemory(requested);
469 CHECK(initial_chunk_ != NULL); 470 CHECK(initial_chunk_ != NULL);
470 if (!initial_chunk_->IsReserved()) { 471 if (!initial_chunk_->IsReserved()) {
471 delete initial_chunk_; 472 delete initial_chunk_;
472 initial_chunk_ = NULL; 473 initial_chunk_ = NULL;
473 return NULL; 474 return NULL;
474 } 475 }
475 476
476 // We are sure that we have mapped a block of requested addresses. 477 // We are sure that we have mapped a block of requested addresses.
477 ASSERT(initial_chunk_->size() == requested); 478 ASSERT(initial_chunk_->size() == requested);
478 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested)); 479 LOG(isolate_,
480 NewEvent("InitialChunk", initial_chunk_->address(), requested));
479 size_ += static_cast<int>(requested); 481 size_ += static_cast<int>(requested);
480 return initial_chunk_->address(); 482 return initial_chunk_->address();
481 } 483 }
482 484
483 485
484 static int PagesInChunk(Address start, size_t size) { 486 static int PagesInChunk(Address start, size_t size) {
485 // The first page starts on the first page-aligned address from start onward 487 // The first page starts on the first page-aligned address from start onward
486 // and the last page ends on the last page-aligned address before 488 // and the last page ends on the last page-aligned address before
487 // start+size. Page::kPageSize is a power of two so we can divide by 489 // start+size. Page::kPageSize is a power of two so we can divide by
488 // shifting. 490 // shifting.
489 return static_cast<int>((RoundDown(start + size, Page::kPageSize) 491 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
490 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits); 492 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
491 } 493 }
492 494
493 495
494 Page* MemoryAllocator::AllocatePages(int requested_pages, 496 Page* MemoryAllocator::AllocatePages(int requested_pages,
495 int* allocated_pages, 497 int* allocated_pages,
496 PagedSpace* owner) { 498 PagedSpace* owner) {
497 if (requested_pages <= 0) return Page::FromAddress(NULL); 499 if (requested_pages <= 0) return Page::FromAddress(NULL);
498 size_t chunk_size = requested_pages * Page::kPageSize; 500 size_t chunk_size = requested_pages * Page::kPageSize;
499 501
500 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); 502 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
501 if (chunk == NULL) return Page::FromAddress(NULL); 503 if (chunk == NULL) return Page::FromAddress(NULL);
502 LOG(NewEvent("PagedChunk", chunk, chunk_size)); 504 LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
503 505
504 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); 506 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
505 // We may 'lose' a page due to alignment. 507 // We may 'lose' a page due to alignment.
506 ASSERT(*allocated_pages >= kPagesPerChunk - 1); 508 ASSERT(*allocated_pages >= kPagesPerChunk - 1);
507 if (*allocated_pages == 0) { 509 if (*allocated_pages == 0) {
508 FreeRawMemory(chunk, chunk_size, owner->executable()); 510 FreeRawMemory(chunk, chunk_size, owner->executable());
509 LOG(DeleteEvent("PagedChunk", chunk)); 511 LOG(isolate_, DeleteEvent("PagedChunk", chunk));
510 return Page::FromAddress(NULL); 512 return Page::FromAddress(NULL);
511 } 513 }
512 514
513 int chunk_id = Pop(); 515 int chunk_id = Pop();
514 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); 516 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
515 517
516 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 518 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
517 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 519 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
518 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner); 520 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
519 521
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
668 670
669 // We cannot free a chunk contained in the initial chunk because it was not 671 // We cannot free a chunk contained in the initial chunk because it was not
670 // allocated with AllocateRawMemory. Instead we uncommit the virtual 672 // allocated with AllocateRawMemory. Instead we uncommit the virtual
671 // memory. 673 // memory.
672 if (InInitialChunk(c.address())) { 674 if (InInitialChunk(c.address())) {
673 // TODO(1240712): VirtualMemory::Uncommit has a return value which 675 // TODO(1240712): VirtualMemory::Uncommit has a return value which
674 // is ignored here. 676 // is ignored here.
675 initial_chunk_->Uncommit(c.address(), c.size()); 677 initial_chunk_->Uncommit(c.address(), c.size());
676 COUNTERS->memory_allocated()->Decrement(static_cast<int>(c.size())); 678 COUNTERS->memory_allocated()->Decrement(static_cast<int>(c.size()));
677 } else { 679 } else {
678 LOG(DeleteEvent("PagedChunk", c.address())); 680 LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
679 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity()); 681 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
680 size_t size = c.size(); 682 size_t size = c.size();
681 FreeRawMemory(c.address(), size, c.executable()); 683 FreeRawMemory(c.address(), size, c.executable());
682 PerformAllocationCallback(space, kAllocationActionFree, size); 684 PerformAllocationCallback(space, kAllocationActionFree, size);
683 } 685 }
684 c.init(NULL, 0, NULL); 686 c.init(NULL, 0, NULL);
685 Push(chunk_id); 687 Push(chunk_id);
686 } 688 }
687 689
688 690
(...skipping 963 matching lines...) Expand 10 before | Expand all | Expand 10 after
1652 // flag is set. 1654 // flag is set.
1653 void NewSpace::CollectStatistics() { 1655 void NewSpace::CollectStatistics() {
1654 ClearHistograms(); 1656 ClearHistograms();
1655 SemiSpaceIterator it(this); 1657 SemiSpaceIterator it(this);
1656 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) 1658 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1657 RecordAllocation(obj); 1659 RecordAllocation(obj);
1658 } 1660 }
1659 1661
1660 1662
1661 #ifdef ENABLE_LOGGING_AND_PROFILING 1663 #ifdef ENABLE_LOGGING_AND_PROFILING
1662 static void DoReportStatistics(HistogramInfo* info, const char* description) { 1664 static void DoReportStatistics(Isolate* isolate,
1663 LOG(HeapSampleBeginEvent("NewSpace", description)); 1665 HistogramInfo* info, const char* description) {
1666 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1664 // Lump all the string types together. 1667 // Lump all the string types together.
1665 int string_number = 0; 1668 int string_number = 0;
1666 int string_bytes = 0; 1669 int string_bytes = 0;
1667 #define INCREMENT(type, size, name, camel_name) \ 1670 #define INCREMENT(type, size, name, camel_name) \
1668 string_number += info[type].number(); \ 1671 string_number += info[type].number(); \
1669 string_bytes += info[type].bytes(); 1672 string_bytes += info[type].bytes();
1670 STRING_TYPE_LIST(INCREMENT) 1673 STRING_TYPE_LIST(INCREMENT)
1671 #undef INCREMENT 1674 #undef INCREMENT
1672 if (string_number > 0) { 1675 if (string_number > 0) {
1673 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); 1676 LOG(isolate,
1677 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1674 } 1678 }
1675 1679
1676 // Then do the other types. 1680 // Then do the other types.
1677 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { 1681 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1678 if (info[i].number() > 0) { 1682 if (info[i].number() > 0) {
1679 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(), 1683 LOG(isolate,
1684 HeapSampleItemEvent(info[i].name(), info[i].number(),
1680 info[i].bytes())); 1685 info[i].bytes()));
1681 } 1686 }
1682 } 1687 }
1683 LOG(HeapSampleEndEvent("NewSpace", description)); 1688 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1684 } 1689 }
1685 #endif // ENABLE_LOGGING_AND_PROFILING 1690 #endif // ENABLE_LOGGING_AND_PROFILING
1686 1691
1687 1692
1688 void NewSpace::ReportStatistics() { 1693 void NewSpace::ReportStatistics() {
1689 #ifdef DEBUG 1694 #ifdef DEBUG
1690 if (FLAG_heap_stats) { 1695 if (FLAG_heap_stats) {
1691 float pct = static_cast<float>(Available()) / Capacity(); 1696 float pct = static_cast<float>(Available()) / Capacity();
1692 PrintF(" capacity: %" V8_PTR_PREFIX "d" 1697 PrintF(" capacity: %" V8_PTR_PREFIX "d"
1693 ", available: %" V8_PTR_PREFIX "d, %%%d\n", 1698 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1694 Capacity(), Available(), static_cast<int>(pct*100)); 1699 Capacity(), Available(), static_cast<int>(pct*100));
1695 PrintF("\n Object Histogram:\n"); 1700 PrintF("\n Object Histogram:\n");
1696 for (int i = 0; i <= LAST_TYPE; i++) { 1701 for (int i = 0; i <= LAST_TYPE; i++) {
1697 if (allocated_histogram_[i].number() > 0) { 1702 if (allocated_histogram_[i].number() > 0) {
1698 PrintF(" %-34s%10d (%10d bytes)\n", 1703 PrintF(" %-34s%10d (%10d bytes)\n",
1699 allocated_histogram_[i].name(), 1704 allocated_histogram_[i].name(),
1700 allocated_histogram_[i].number(), 1705 allocated_histogram_[i].number(),
1701 allocated_histogram_[i].bytes()); 1706 allocated_histogram_[i].bytes());
1702 } 1707 }
1703 } 1708 }
1704 PrintF("\n"); 1709 PrintF("\n");
1705 } 1710 }
1706 #endif // DEBUG 1711 #endif // DEBUG
1707 1712
1708 #ifdef ENABLE_LOGGING_AND_PROFILING 1713 #ifdef ENABLE_LOGGING_AND_PROFILING
1709 if (FLAG_log_gc) { 1714 if (FLAG_log_gc) {
1710 DoReportStatistics(allocated_histogram_, "allocated"); 1715 Isolate* isolate = ISOLATE;
1711 DoReportStatistics(promoted_histogram_, "promoted"); 1716 DoReportStatistics(isolate, allocated_histogram_, "allocated");
1717 DoReportStatistics(isolate, promoted_histogram_, "promoted");
1712 } 1718 }
1713 #endif // ENABLE_LOGGING_AND_PROFILING 1719 #endif // ENABLE_LOGGING_AND_PROFILING
1714 } 1720 }
1715 1721
1716 1722
1717 void NewSpace::RecordAllocation(HeapObject* obj) { 1723 void NewSpace::RecordAllocation(HeapObject* obj) {
1718 InstanceType type = obj->map()->instance_type(); 1724 InstanceType type = obj->map()->instance_type();
1719 ASSERT(0 <= type && type <= LAST_TYPE); 1725 ASSERT(0 <= type && type <= LAST_TYPE);
1720 allocated_histogram_[type].increment_number(1); 1726 allocated_histogram_[type].increment_number(1);
1721 allocated_histogram_[type].increment_bytes(obj->Size()); 1727 allocated_histogram_[type].increment_bytes(obj->Size());
(...skipping 987 matching lines...) Expand 10 before | Expand all | Expand 10 after
2709 } 2715 }
2710 2716
2711 2717
2712 // ----------------------------------------------------------------------------- 2718 // -----------------------------------------------------------------------------
2713 // LargeObjectChunk 2719 // LargeObjectChunk
2714 2720
2715 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, 2721 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2716 Executability executable) { 2722 Executability executable) {
2717 size_t requested = ChunkSizeFor(size_in_bytes); 2723 size_t requested = ChunkSizeFor(size_in_bytes);
2718 size_t size; 2724 size_t size;
2719 void* mem = Isolate::Current()->memory_allocator()->AllocateRawMemory( 2725 Isolate* isolate = Isolate::Current();
2726 void* mem = isolate->memory_allocator()->AllocateRawMemory(
2720 requested, &size, executable); 2727 requested, &size, executable);
2721 if (mem == NULL) return NULL; 2728 if (mem == NULL) return NULL;
2722 2729
2723 // The start of the chunk may be overlayed with a page so we have to 2730 // The start of the chunk may be overlayed with a page so we have to
2724 // make sure that the page flags fit in the size field. 2731 // make sure that the page flags fit in the size field.
2725 ASSERT((size & Page::kPageFlagMask) == 0); 2732 ASSERT((size & Page::kPageFlagMask) == 0);
2726 2733
2727 LOG(NewEvent("LargeObjectChunk", mem, size)); 2734 LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
2728 if (size < requested) { 2735 if (size < requested) {
2729 Isolate::Current()->memory_allocator()->FreeRawMemory( 2736 isolate->memory_allocator()->FreeRawMemory(
2730 mem, size, executable); 2737 mem, size, executable);
2731 LOG(DeleteEvent("LargeObjectChunk", mem)); 2738 LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
2732 return NULL; 2739 return NULL;
2733 } 2740 }
2734 2741
2735 ObjectSpace space = (executable == EXECUTABLE) 2742 ObjectSpace space = (executable == EXECUTABLE)
2736 ? kObjectSpaceCodeSpace 2743 ? kObjectSpaceCodeSpace
2737 : kObjectSpaceLoSpace; 2744 : kObjectSpaceLoSpace;
2738 Isolate::Current()->memory_allocator()->PerformAllocationCallback( 2745 isolate->memory_allocator()->PerformAllocationCallback(
2739 space, kAllocationActionAllocate, size); 2746 space, kAllocationActionAllocate, size);
2740 2747
2741 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem); 2748 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2742 chunk->size_ = size; 2749 chunk->size_ = size;
2743 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2750 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2744 page->heap_ = Isolate::Current()->heap(); 2751 page->heap_ = Isolate::Current()->heap();
2745 return chunk; 2752 return chunk;
2746 } 2753 }
2747 2754
2748 2755
(...skipping 22 matching lines...) Expand all
2771 page_count_ = 0; 2778 page_count_ = 0;
2772 objects_size_ = 0; 2779 objects_size_ = 0;
2773 return true; 2780 return true;
2774 } 2781 }
2775 2782
2776 2783
2777 void LargeObjectSpace::TearDown() { 2784 void LargeObjectSpace::TearDown() {
2778 while (first_chunk_ != NULL) { 2785 while (first_chunk_ != NULL) {
2779 LargeObjectChunk* chunk = first_chunk_; 2786 LargeObjectChunk* chunk = first_chunk_;
2780 first_chunk_ = first_chunk_->next(); 2787 first_chunk_ = first_chunk_->next();
2781 LOG(DeleteEvent("LargeObjectChunk", chunk->address())); 2788 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
2782 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2789 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2783 Executability executable = 2790 Executability executable =
2784 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE; 2791 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2785 ObjectSpace space = kObjectSpaceLoSpace; 2792 ObjectSpace space = kObjectSpaceLoSpace;
2786 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; 2793 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2787 size_t size = chunk->size(); 2794 size_t size = chunk->size();
2788 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(), 2795 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
2789 size, 2796 size,
2790 executable); 2797 executable);
2791 heap()->isolate()->memory_allocator()->PerformAllocationCallback( 2798 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
3007 size_ -= static_cast<int>(chunk_size); 3014 size_ -= static_cast<int>(chunk_size);
3008 objects_size_ -= object->Size(); 3015 objects_size_ -= object->Size();
3009 page_count_--; 3016 page_count_--;
3010 ObjectSpace space = kObjectSpaceLoSpace; 3017 ObjectSpace space = kObjectSpaceLoSpace;
3011 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; 3018 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
3012 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address, 3019 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
3013 chunk_size, 3020 chunk_size,
3014 executable); 3021 executable);
3015 heap()->isolate()->memory_allocator()->PerformAllocationCallback( 3022 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
3016 space, kAllocationActionFree, size_); 3023 space, kAllocationActionFree, size_);
3017 LOG(DeleteEvent("LargeObjectChunk", chunk_address)); 3024 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
3018 } 3025 }
3019 } 3026 }
3020 } 3027 }
3021 3028
3022 3029
3023 bool LargeObjectSpace::Contains(HeapObject* object) { 3030 bool LargeObjectSpace::Contains(HeapObject* object) {
3024 Address address = object->address(); 3031 Address address = object->address();
3025 if (heap()->new_space()->Contains(address)) { 3032 if (heap()->new_space()->Contains(address)) {
3026 return false; 3033 return false;
3027 } 3034 }
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
3124 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 3131 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
3125 if (obj->IsCode()) { 3132 if (obj->IsCode()) {
3126 Code* code = Code::cast(obj); 3133 Code* code = Code::cast(obj);
3127 isolate->code_kind_statistics()[code->kind()] += code->Size(); 3134 isolate->code_kind_statistics()[code->kind()] += code->Size();
3128 } 3135 }
3129 } 3136 }
3130 } 3137 }
3131 #endif // DEBUG 3138 #endif // DEBUG
3132 3139
3133 } } // namespace v8::internal 3140 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/serialize.cc ('k') | src/string-stream.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698