Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(255)

Side by Side Diff: src/spaces.cc

Issue 7744025: Centralize code for freeing LargeObjectChunks, fixing an uncommit bug. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 2710 matching lines...) Expand 10 before | Expand all | Expand 10 after
2721 space, kAllocationActionAllocate, size); 2721 space, kAllocationActionAllocate, size);
2722 2722
2723 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem); 2723 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2724 chunk->size_ = size; 2724 chunk->size_ = size;
2725 chunk->GetPage()->heap_ = isolate->heap(); 2725 chunk->GetPage()->heap_ = isolate->heap();
2726 return chunk; 2726 return chunk;
2727 } 2727 }
2728 2728
2729 2729
2730 void LargeObjectChunk::Free(Executability executable) { 2730 void LargeObjectChunk::Free(Executability executable) {
2731 size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
2732 ObjectSpace space =
2733 (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
2734 // Do not access instance fields after FreeRawMemory!
2735 Address my_address = address();
2736 size_t my_size = size();
2731 Isolate* isolate = GetPage()->heap_->isolate(); 2737 Isolate* isolate = GetPage()->heap_->isolate();
2732 isolate->memory_allocator()->FreeRawMemory(address(), size(), executable); 2738 MemoryAllocator* a = isolate->memory_allocator();
2739 a->FreeRawMemory(my_address - guard_size, my_size + guard_size, executable);
2740 a->PerformAllocationCallback(space, kAllocationActionFree, my_size);
2741 LOG(isolate, DeleteEvent("LargeObjectChunk", my_address));
2733 } 2742 }
2734 2743
2735 2744
2736 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { 2745 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2737 int os_alignment = static_cast<int>(OS::AllocateAlignment()); 2746 int os_alignment = static_cast<int>(OS::AllocateAlignment());
2738 if (os_alignment < Page::kPageSize) { 2747 if (os_alignment < Page::kPageSize) {
2739 size_in_bytes += (Page::kPageSize - os_alignment); 2748 size_in_bytes += (Page::kPageSize - os_alignment);
2740 } 2749 }
2741 return size_in_bytes + Page::kObjectStartOffset; 2750 return size_in_bytes + Page::kObjectStartOffset;
2742 } 2751 }
(...skipping 15 matching lines...) Expand all
2758 page_count_ = 0; 2767 page_count_ = 0;
2759 objects_size_ = 0; 2768 objects_size_ = 0;
2760 return true; 2769 return true;
2761 } 2770 }
2762 2771
2763 2772
2764 void LargeObjectSpace::TearDown() { 2773 void LargeObjectSpace::TearDown() {
2765 while (first_chunk_ != NULL) { 2774 while (first_chunk_ != NULL) {
2766 LargeObjectChunk* chunk = first_chunk_; 2775 LargeObjectChunk* chunk = first_chunk_;
2767 first_chunk_ = first_chunk_->next(); 2776 first_chunk_ = first_chunk_->next();
2768 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address())); 2777 chunk->Free(chunk->GetPage()->PageExecutability());
2769 Executability executable = chunk->GetPage()->PageExecutability();
2770 ObjectSpace space = kObjectSpaceLoSpace;
2771 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2772 size_t size = chunk->size();
2773 size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
2774 heap()->isolate()->memory_allocator()->FreeRawMemory(
2775 chunk->address() - guard_size,
2776 size + guard_size,
2777 executable);
2778 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2779 space, kAllocationActionFree, size);
2780 } 2778 }
2781 2779 Setup();
Vyacheslav Egorov (Chromium) 2011/08/25 15:22:54 Calling Setup from TearDown is very confusing.
2782 size_ = 0;
2783 page_count_ = 0;
2784 objects_size_ = 0;
2785 } 2780 }
2786 2781
2787 2782
2788 MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size, 2783 MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
2789 int object_size, 2784 int object_size,
2790 Executability executable) { 2785 Executability executable) {
2791 ASSERT(0 < object_size && object_size <= requested_size); 2786 ASSERT(0 < object_size && object_size <= requested_size);
2792 2787
2793 // Check if we want to force a GC before growing the old space further. 2788 // Check if we want to force a GC before growing the old space further.
2794 // If so, fail the allocation. 2789 // If so, fail the allocation.
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
2940 LargeObjectChunk* previous = NULL; 2935 LargeObjectChunk* previous = NULL;
2941 LargeObjectChunk* current = first_chunk_; 2936 LargeObjectChunk* current = first_chunk_;
2942 while (current != NULL) { 2937 while (current != NULL) {
2943 HeapObject* object = current->GetObject(); 2938 HeapObject* object = current->GetObject();
2944 if (object->IsMarked()) { 2939 if (object->IsMarked()) {
2945 object->ClearMark(); 2940 object->ClearMark();
2946 heap()->mark_compact_collector()->tracer()->decrement_marked_count(); 2941 heap()->mark_compact_collector()->tracer()->decrement_marked_count();
2947 previous = current; 2942 previous = current;
2948 current = current->next(); 2943 current = current->next();
2949 } else { 2944 } else {
2950 Executability executable = current->GetPage()->PageExecutability();
2951 Address chunk_address = current->address();
2952 size_t chunk_size = current->size();
2953
2954 // Cut the chunk out from the chunk list. 2945 // Cut the chunk out from the chunk list.
2946 LargeObjectChunk* current_chunk = current;
2955 current = current->next(); 2947 current = current->next();
2956 if (previous == NULL) { 2948 if (previous == NULL) {
2957 first_chunk_ = current; 2949 first_chunk_ = current;
2958 } else { 2950 } else {
2959 previous->set_next(current); 2951 previous->set_next(current);
2960 } 2952 }
2961 2953
2962 // Free the chunk. 2954 // Free the chunk.
2963 heap()->mark_compact_collector()->ReportDeleteIfNeeded( 2955 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2964 object, heap()->isolate()); 2956 object, heap()->isolate());
2965 LiveObjectList::ProcessNonLive(object); 2957 LiveObjectList::ProcessNonLive(object);
2966 2958
2967 size_ -= static_cast<int>(chunk_size); 2959 size_ -= static_cast<int>(current_chunk->size());
2968 objects_size_ -= object->Size(); 2960 objects_size_ -= object->Size();
2969 page_count_--; 2961 page_count_--;
2970 ObjectSpace space = kObjectSpaceLoSpace; 2962 current_chunk->Free(current_chunk->GetPage()->PageExecutability());
2971 size_t guard_size = 0;
2972 if (executable == EXECUTABLE) {
2973 space = kObjectSpaceCodeSpace;
2974 guard_size = Page::kPageSize;
2975 }
2976 heap()->isolate()->memory_allocator()->FreeRawMemory(
2977 chunk_address - guard_size,
2978 chunk_size + guard_size,
2979 executable);
2980 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2981 space, kAllocationActionFree, size_);
2982 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
2983 } 2963 }
2984 } 2964 }
2985 } 2965 }
2986 2966
2987 2967
2988 bool LargeObjectSpace::Contains(HeapObject* object) { 2968 bool LargeObjectSpace::Contains(HeapObject* object) {
2989 Address address = object->address(); 2969 Address address = object->address();
2990 if (heap()->new_space()->Contains(address)) { 2970 if (heap()->new_space()->Contains(address)) {
2991 return false; 2971 return false;
2992 } 2972 }
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
3089 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 3069 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
3090 if (obj->IsCode()) { 3070 if (obj->IsCode()) {
3091 Code* code = Code::cast(obj); 3071 Code* code = Code::cast(obj);
3092 isolate->code_kind_statistics()[code->kind()] += code->Size(); 3072 isolate->code_kind_statistics()[code->kind()] += code->Size();
3093 } 3073 }
3094 } 3074 }
3095 } 3075 }
3096 #endif // DEBUG 3076 #endif // DEBUG
3097 3077
3098 } } // namespace v8::internal 3078 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698