Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(329)

Side by Side Diff: src/spaces.cc

Issue 6529055: [Isolates] Merge crankshaft (r5922 from bleeding_edge). (Closed)
Patch Set: Win32 port Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 472 matching lines...) Expand 10 before | Expand all | Expand 10 after
483 static int PagesInChunk(Address start, size_t size) { 483 static int PagesInChunk(Address start, size_t size) {
484 // The first page starts on the first page-aligned address from start onward 484 // The first page starts on the first page-aligned address from start onward
485 // and the last page ends on the last page-aligned address before 485 // and the last page ends on the last page-aligned address before
486 // start+size. Page::kPageSize is a power of two so we can divide by 486 // start+size. Page::kPageSize is a power of two so we can divide by
487 // shifting. 487 // shifting.
488 return static_cast<int>((RoundDown(start + size, Page::kPageSize) 488 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
489 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits); 489 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
490 } 490 }
491 491
492 492
493 Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages, 493 Page* MemoryAllocator::AllocatePages(int requested_pages,
494 int* allocated_pages,
494 PagedSpace* owner) { 495 PagedSpace* owner) {
495 if (requested_pages <= 0) return Page::FromAddress(NULL); 496 if (requested_pages <= 0) return Page::FromAddress(NULL);
496 size_t chunk_size = requested_pages * Page::kPageSize; 497 size_t chunk_size = requested_pages * Page::kPageSize;
497 498
498 // There is not enough space to guarantee the desired number pages can be
499 // allocated.
500 if (size_ + static_cast<int>(chunk_size) > capacity_) {
501 // Request as many pages as we can.
502 chunk_size = capacity_ - size_;
503 requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
504
505 if (requested_pages <= 0) return Page::FromAddress(NULL);
506 }
507 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); 499 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
508 if (chunk == NULL) return Page::FromAddress(NULL); 500 if (chunk == NULL) return Page::FromAddress(NULL);
509 LOG(NewEvent("PagedChunk", chunk, chunk_size)); 501 LOG(NewEvent("PagedChunk", chunk, chunk_size));
510 502
511 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); 503 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
504 // We may 'lose' a page due to alignment.
505 ASSERT(*allocated_pages >= kPagesPerChunk - 1);
512 if (*allocated_pages == 0) { 506 if (*allocated_pages == 0) {
513 FreeRawMemory(chunk, chunk_size, owner->executable()); 507 FreeRawMemory(chunk, chunk_size, owner->executable());
514 LOG(DeleteEvent("PagedChunk", chunk)); 508 LOG(DeleteEvent("PagedChunk", chunk));
515 return Page::FromAddress(NULL); 509 return Page::FromAddress(NULL);
516 } 510 }
517 511
518 int chunk_id = Pop(); 512 int chunk_id = Pop();
519 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); 513 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
520 514
521 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 515 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
522 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 516 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
523 return InitializePagesInChunk(chunk_id, *allocated_pages, owner); 517 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
518
519 return new_pages;
524 } 520 }
525 521
526 522
527 Page* MemoryAllocator::CommitPages(Address start, size_t size, 523 Page* MemoryAllocator::CommitPages(Address start, size_t size,
528 PagedSpace* owner, int* num_pages) { 524 PagedSpace* owner, int* num_pages) {
529 ASSERT(start != NULL); 525 ASSERT(start != NULL);
530 *num_pages = PagesInChunk(start, size); 526 *num_pages = PagesInChunk(start, size);
531 ASSERT(*num_pages > 0); 527 ASSERT(*num_pages > 0);
532 ASSERT(initial_chunk_ != NULL); 528 ASSERT(initial_chunk_ != NULL);
533 ASSERT(InInitialChunk(start)); 529 ASSERT(InInitialChunk(start));
(...skipping 476 matching lines...) Expand 10 before | Expand all | Expand 10 after
1010 ASSERT(Capacity() % Page::kObjectAreaSize == 0); 1006 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
1011 1007
1012 if (Capacity() == max_capacity_) return false; 1008 if (Capacity() == max_capacity_) return false;
1013 1009
1014 ASSERT(Capacity() < max_capacity_); 1010 ASSERT(Capacity() < max_capacity_);
1015 // Last page must be valid and its next page is invalid. 1011 // Last page must be valid and its next page is invalid.
1016 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid()); 1012 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
1017 1013
1018 int available_pages = 1014 int available_pages =
1019 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize); 1015 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
1020 if (available_pages <= 0) return false; 1016 // We don't want to have to handle small chunks near the end so if there are
1017 // not kPagesPerChunk pages available without exceeding the max capacity then
1018 // act as if memory has run out.
1019 if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
1021 1020
1022 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); 1021 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
1023 Page* p = heap()->isolate()->memory_allocator()->AllocatePages( 1022 Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
1024 desired_pages, &desired_pages, this); 1023 desired_pages, &desired_pages, this);
1025 if (!p->is_valid()) return false; 1024 if (!p->is_valid()) return false;
1026 1025
1027 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize); 1026 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
1028 ASSERT(Capacity() <= max_capacity_); 1027 ASSERT(Capacity() <= max_capacity_);
1029 1028
1030 heap()->isolate()->memory_allocator()->SetNextPage(last_page, p); 1029 heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
(...skipping 518 matching lines...) Expand 10 before | Expand all | Expand 10 after
1549 Isolate* isolate = Isolate::Current(); 1548 Isolate* isolate = Isolate::Current();
1550 const char* table[Code::NUMBER_OF_KINDS] = { NULL }; 1549 const char* table[Code::NUMBER_OF_KINDS] = { NULL };
1551 1550
1552 #define CASE(name) \ 1551 #define CASE(name) \
1553 case Code::name: table[Code::name] = #name; \ 1552 case Code::name: table[Code::name] = #name; \
1554 break 1553 break
1555 1554
1556 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { 1555 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1557 switch (static_cast<Code::Kind>(i)) { 1556 switch (static_cast<Code::Kind>(i)) {
1558 CASE(FUNCTION); 1557 CASE(FUNCTION);
1558 CASE(OPTIMIZED_FUNCTION);
1559 CASE(STUB); 1559 CASE(STUB);
1560 CASE(BUILTIN); 1560 CASE(BUILTIN);
1561 CASE(LOAD_IC); 1561 CASE(LOAD_IC);
1562 CASE(KEYED_LOAD_IC); 1562 CASE(KEYED_LOAD_IC);
1563 CASE(STORE_IC); 1563 CASE(STORE_IC);
1564 CASE(KEYED_STORE_IC); 1564 CASE(KEYED_STORE_IC);
1565 CASE(CALL_IC); 1565 CASE(CALL_IC);
1566 CASE(KEYED_CALL_IC); 1566 CASE(KEYED_CALL_IC);
1567 CASE(BINARY_OP_IC); 1567 CASE(BINARY_OP_IC);
1568 CASE(TYPE_RECORDING_BINARY_OP_IC);
1569 CASE(COMPARE_IC);
1568 } 1570 }
1569 } 1571 }
1570 1572
1571 #undef CASE 1573 #undef CASE
1572 1574
1573 PrintF("\n Code kind histograms: \n"); 1575 PrintF("\n Code kind histograms: \n");
1574 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { 1576 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1575 if (isolate->code_kind_statistics()[i] > 0) { 1577 if (isolate->code_kind_statistics()[i] > 0) {
1576 PrintF(" %-20s: %10d bytes\n", table[i], 1578 PrintF(" %-20s: %10d bytes\n", table[i],
1577 isolate->code_kind_statistics()[i]); 1579 isolate->code_kind_statistics()[i]);
(...skipping 1123 matching lines...) Expand 10 before | Expand all | Expand 10 after
2701 HeapObject* object = current_->GetObject(); 2703 HeapObject* object = current_->GetObject();
2702 current_ = current_->next(); 2704 current_ = current_->next();
2703 return object; 2705 return object;
2704 } 2706 }
2705 2707
2706 2708
2707 // ----------------------------------------------------------------------------- 2709 // -----------------------------------------------------------------------------
2708 // LargeObjectChunk 2710 // LargeObjectChunk
2709 2711
2710 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, 2712 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2711 size_t* chunk_size,
2712 Executability executable) { 2713 Executability executable) {
2713 size_t requested = ChunkSizeFor(size_in_bytes); 2714 size_t requested = ChunkSizeFor(size_in_bytes);
2715 size_t size;
2714 void* mem = Isolate::Current()->memory_allocator()->AllocateRawMemory( 2716 void* mem = Isolate::Current()->memory_allocator()->AllocateRawMemory(
2715 requested, chunk_size, executable); 2717 requested, &size, executable);
2716 if (mem == NULL) return NULL; 2718 if (mem == NULL) return NULL;
2717 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size)); 2719
2718 if (*chunk_size < requested) { 2720 // The start of the chunk may be overlayed with a page so we have to
2721 // make sure that the page flags fit in the size field.
2722 ASSERT((size & Page::kPageFlagMask) == 0);
2723
2724 LOG(NewEvent("LargeObjectChunk", mem, size));
2725 if (size < requested) {
2719 Isolate::Current()->memory_allocator()->FreeRawMemory( 2726 Isolate::Current()->memory_allocator()->FreeRawMemory(
2720 mem, *chunk_size, executable); 2727 mem, size, executable);
2721 LOG(DeleteEvent("LargeObjectChunk", mem)); 2728 LOG(DeleteEvent("LargeObjectChunk", mem));
2722 return NULL; 2729 return NULL;
2723 } 2730 }
2724 ObjectSpace space = 2731
2725 (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace; 2732 ObjectSpace space = (executable == EXECUTABLE)
2733 ? kObjectSpaceCodeSpace
2734 : kObjectSpaceLoSpace;
2726 Isolate::Current()->memory_allocator()->PerformAllocationCallback( 2735 Isolate::Current()->memory_allocator()->PerformAllocationCallback(
2727 space, kAllocationActionAllocate, *chunk_size); 2736 space, kAllocationActionAllocate, size);
2728 return reinterpret_cast<LargeObjectChunk*>(mem); 2737
2738 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2739 chunk->size_ = size;
2740 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2741 page->heap_ = Isolate::Current()->heap();
2742 return chunk;
2729 } 2743 }
2730 2744
2731 2745
2732 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { 2746 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2733 int os_alignment = static_cast<int>(OS::AllocateAlignment()); 2747 int os_alignment = static_cast<int>(OS::AllocateAlignment());
2734 if (os_alignment < Page::kPageSize) 2748 if (os_alignment < Page::kPageSize) {
2735 size_in_bytes += (Page::kPageSize - os_alignment); 2749 size_in_bytes += (Page::kPageSize - os_alignment);
2750 }
2736 return size_in_bytes + Page::kObjectStartOffset; 2751 return size_in_bytes + Page::kObjectStartOffset;
2737 } 2752 }
2738 2753
2739 // ----------------------------------------------------------------------------- 2754 // -----------------------------------------------------------------------------
2740 // LargeObjectSpace 2755 // LargeObjectSpace
2741 2756
2742 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) 2757 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
2743 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis 2758 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2744 first_chunk_(NULL), 2759 first_chunk_(NULL),
2745 size_(0), 2760 size_(0),
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
2810 Executability executable) { 2825 Executability executable) {
2811 ASSERT(0 < object_size && object_size <= requested_size); 2826 ASSERT(0 < object_size && object_size <= requested_size);
2812 2827
2813 // Check if we want to force a GC before growing the old space further. 2828 // Check if we want to force a GC before growing the old space further.
2814 // If so, fail the allocation. 2829 // If so, fail the allocation.
2815 if (!heap()->always_allocate() && 2830 if (!heap()->always_allocate() &&
2816 heap()->OldGenerationAllocationLimitReached()) { 2831 heap()->OldGenerationAllocationLimitReached()) {
2817 return Failure::RetryAfterGC(identity()); 2832 return Failure::RetryAfterGC(identity());
2818 } 2833 }
2819 2834
2820 size_t chunk_size; 2835 LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
2821 LargeObjectChunk* chunk =
2822 LargeObjectChunk::New(requested_size, &chunk_size, executable);
2823 if (chunk == NULL) { 2836 if (chunk == NULL) {
2824 return Failure::RetryAfterGC(identity()); 2837 return Failure::RetryAfterGC(identity());
2825 } 2838 }
2826 2839
2827 size_ += static_cast<int>(chunk_size); 2840 size_ += static_cast<int>(chunk->size());
2828 objects_size_ += requested_size; 2841 objects_size_ += requested_size;
2829 page_count_++; 2842 page_count_++;
2830 chunk->set_next(first_chunk_); 2843 chunk->set_next(first_chunk_);
2831 chunk->set_size(chunk_size);
2832 first_chunk_ = chunk; 2844 first_chunk_ = chunk;
2833 2845
2834 // Initialize page header. 2846 // Initialize page header.
2835 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2847 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2836 Address object_address = page->ObjectAreaStart(); 2848 Address object_address = page->ObjectAreaStart();
2849
2837 // Clear the low order bit of the second word in the page to flag it as a 2850 // Clear the low order bit of the second word in the page to flag it as a
2838 // large object page. If the chunk_size happened to be written there, its 2851 // large object page. If the chunk_size happened to be written there, its
2839 // low order bit should already be clear. 2852 // low order bit should already be clear.
2840 ASSERT((chunk_size & 0x1) == 0);
2841 page->SetIsLargeObjectPage(true); 2853 page->SetIsLargeObjectPage(true);
2842 page->SetIsPageExecutable(executable); 2854 page->SetIsPageExecutable(executable);
2843 page->SetRegionMarks(Page::kAllRegionsCleanMarks); 2855 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2844 return HeapObject::FromAddress(object_address); 2856 return HeapObject::FromAddress(object_address);
2845 } 2857 }
2846 2858
2847 2859
2848 MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { 2860 MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
2849 ASSERT(0 < size_in_bytes); 2861 ASSERT(0 < size_in_bytes);
2850 return AllocateRawInternal(size_in_bytes, 2862 return AllocateRawInternal(size_in_bytes,
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after
3107 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 3119 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
3108 if (obj->IsCode()) { 3120 if (obj->IsCode()) {
3109 Code* code = Code::cast(obj); 3121 Code* code = Code::cast(obj);
3110 isolate->code_kind_statistics()[code->kind()] += code->Size(); 3122 isolate->code_kind_statistics()[code->kind()] += code->Size();
3111 } 3123 }
3112 } 3124 }
3113 } 3125 }
3114 #endif // DEBUG 3126 #endif // DEBUG
3115 3127
3116 } } // namespace v8::internal 3128 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698