Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(249)

Side by Side Diff: src/heap.cc

Issue 24205004: Rollback trunk to 3.21.16.2 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/heap-snapshot-generator.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
122 #endif // DEBUG 122 #endif // DEBUG
123 new_space_high_promotion_mode_active_(false), 123 new_space_high_promotion_mode_active_(false),
124 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit), 124 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
125 size_of_old_gen_at_last_old_space_gc_(0), 125 size_of_old_gen_at_last_old_space_gc_(0),
126 external_allocation_limit_(0), 126 external_allocation_limit_(0),
127 amount_of_external_allocated_memory_(0), 127 amount_of_external_allocated_memory_(0),
128 amount_of_external_allocated_memory_at_last_global_gc_(0), 128 amount_of_external_allocated_memory_at_last_global_gc_(0),
129 old_gen_exhausted_(false), 129 old_gen_exhausted_(false),
130 store_buffer_rebuilder_(store_buffer()), 130 store_buffer_rebuilder_(store_buffer()),
131 hidden_string_(NULL), 131 hidden_string_(NULL),
132 global_gc_prologue_callback_(NULL),
133 global_gc_epilogue_callback_(NULL),
132 gc_safe_size_of_old_object_(NULL), 134 gc_safe_size_of_old_object_(NULL),
133 total_regexp_code_generated_(0), 135 total_regexp_code_generated_(0),
134 tracer_(NULL), 136 tracer_(NULL),
135 young_survivors_after_last_gc_(0), 137 young_survivors_after_last_gc_(0),
136 high_survival_rate_period_length_(0), 138 high_survival_rate_period_length_(0),
137 low_survival_rate_period_length_(0), 139 low_survival_rate_period_length_(0),
138 survival_rate_(0), 140 survival_rate_(0),
139 previous_survival_rate_trend_(Heap::STABLE), 141 previous_survival_rate_trend_(Heap::STABLE),
140 survival_rate_trend_(Heap::STABLE), 142 survival_rate_trend_(Heap::STABLE),
141 max_gc_pause_(0.0), 143 max_gc_pause_(0.0),
(...skipping 904 matching lines...) Expand 10 before | Expand all | Expand 10 after
1046 if (FLAG_verify_heap) { 1048 if (FLAG_verify_heap) {
1047 VerifyStringTable(this); 1049 VerifyStringTable(this);
1048 } 1050 }
1049 #endif 1051 #endif
1050 1052
1051 return next_gc_likely_to_collect_more; 1053 return next_gc_likely_to_collect_more;
1052 } 1054 }
1053 1055
1054 1056
1055 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) { 1057 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1058 if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1059 global_gc_prologue_callback_();
1060 }
1056 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { 1061 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1057 if (gc_type & gc_prologue_callbacks_[i].gc_type) { 1062 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1058 if (!gc_prologue_callbacks_[i].pass_isolate_) { 1063 gc_prologue_callbacks_[i].callback(gc_type, flags);
1059 v8::GCPrologueCallback callback =
1060 reinterpret_cast<v8::GCPrologueCallback>(
1061 gc_prologue_callbacks_[i].callback);
1062 callback(gc_type, flags);
1063 } else {
1064 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1065 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1066 }
1067 } 1064 }
1068 } 1065 }
1069 } 1066 }
1070 1067
1071 1068
1072 void Heap::CallGCEpilogueCallbacks(GCType gc_type) { 1069 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1073 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { 1070 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1074 if (gc_type & gc_epilogue_callbacks_[i].gc_type) { 1071 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1075 if (!gc_epilogue_callbacks_[i].pass_isolate_) { 1072 gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1076 v8::GCPrologueCallback callback =
1077 reinterpret_cast<v8::GCPrologueCallback>(
1078 gc_epilogue_callbacks_[i].callback);
1079 callback(gc_type, kNoGCCallbackFlags);
1080 } else {
1081 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1082 gc_epilogue_callbacks_[i].callback(
1083 isolate, gc_type, kNoGCCallbackFlags);
1084 }
1085 } 1073 }
1086 } 1074 }
1075 if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1076 global_gc_epilogue_callback_();
1077 }
1087 } 1078 }
1088 1079
1089 1080
1090 void Heap::MarkCompact(GCTracer* tracer) { 1081 void Heap::MarkCompact(GCTracer* tracer) {
1091 gc_state_ = MARK_COMPACT; 1082 gc_state_ = MARK_COMPACT;
1092 LOG(isolate_, ResourceEvent("markcompact", "begin")); 1083 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1093 1084
1094 mark_compact_collector_.Prepare(tracer); 1085 mark_compact_collector_.Prepare(tracer);
1095 1086
1096 ms_count_++; 1087 ms_count_++;
(...skipping 3215 matching lines...) Expand 10 before | Expand all | Expand 10 after
4312 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); 4303 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4313 int size = map->instance_size() + AllocationMemento::kSize; 4304 int size = map->instance_size() + AllocationMemento::kSize;
4314 Object* result; 4305 Object* result;
4315 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); 4306 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4316 if (!maybe_result->ToObject(&result)) return maybe_result; 4307 if (!maybe_result->ToObject(&result)) return maybe_result;
4317 // No need for write barrier since object is white and map is in old space. 4308 // No need for write barrier since object is white and map is in old space.
4318 HeapObject::cast(result)->set_map_no_write_barrier(map); 4309 HeapObject::cast(result)->set_map_no_write_barrier(map);
4319 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( 4310 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4320 reinterpret_cast<Address>(result) + map->instance_size()); 4311 reinterpret_cast<Address>(result) + map->instance_size());
4321 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 4312 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4322 ASSERT(allocation_site->map() == allocation_site_map());
4323 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER); 4313 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4324 return result; 4314 return result;
4325 } 4315 }
4326 4316
4327 4317
4328 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { 4318 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4329 ASSERT(gc_state_ == NOT_IN_GC); 4319 ASSERT(gc_state_ == NOT_IN_GC);
4330 ASSERT(map->instance_type() != MAP_TYPE); 4320 ASSERT(map->instance_type() != MAP_TYPE);
4331 // If allocation failures are disallowed, we may allocate in a different 4321 // If allocation failures are disallowed, we may allocate in a different
4332 // space when new space is full and the object is not a large object. 4322 // space when new space is full and the object is not a large object.
(...skipping 723 matching lines...) Expand 10 before | Expand all | Expand 10 after
5056 } 5046 }
5057 5047
5058 // Track allocation site information, if we failed to allocate it inline. 5048 // Track allocation site information, if we failed to allocate it inline.
5059 if (InNewSpace(clone) && 5049 if (InNewSpace(clone) &&
5060 adjusted_object_size == object_size) { 5050 adjusted_object_size == object_size) {
5061 MaybeObject* maybe_alloc_memento = 5051 MaybeObject* maybe_alloc_memento =
5062 AllocateStruct(ALLOCATION_MEMENTO_TYPE); 5052 AllocateStruct(ALLOCATION_MEMENTO_TYPE);
5063 AllocationMemento* alloc_memento; 5053 AllocationMemento* alloc_memento;
5064 if (maybe_alloc_memento->To(&alloc_memento)) { 5054 if (maybe_alloc_memento->To(&alloc_memento)) {
5065 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 5055 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5066 ASSERT(site->map() == allocation_site_map());
5067 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER); 5056 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5068 } 5057 }
5069 } 5058 }
5070 } else { 5059 } else {
5071 wb_mode = SKIP_WRITE_BARRIER; 5060 wb_mode = SKIP_WRITE_BARRIER;
5072 adjusted_object_size += AllocationMemento::kSize; 5061 adjusted_object_size += AllocationMemento::kSize;
5073 5062
5074 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size); 5063 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
5075 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 5064 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5076 } 5065 }
5077 SLOW_ASSERT(InNewSpace(clone)); 5066 SLOW_ASSERT(InNewSpace(clone));
5078 // Since we know the clone is allocated in new space, we can copy 5067 // Since we know the clone is allocated in new space, we can copy
5079 // the contents without worrying about updating the write barrier. 5068 // the contents without worrying about updating the write barrier.
5080 CopyBlock(HeapObject::cast(clone)->address(), 5069 CopyBlock(HeapObject::cast(clone)->address(),
5081 source->address(), 5070 source->address(),
5082 object_size); 5071 object_size);
5083 } 5072 }
5084 5073
5085 if (adjusted_object_size > object_size) { 5074 if (adjusted_object_size > object_size) {
5086 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( 5075 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
5087 reinterpret_cast<Address>(clone) + object_size); 5076 reinterpret_cast<Address>(clone) + object_size);
5088 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 5077 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5089 ASSERT(site->map() == allocation_site_map());
5090 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER); 5078 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5091 } 5079 }
5092 5080
5093 SLOW_ASSERT( 5081 SLOW_ASSERT(
5094 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); 5082 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
5095 FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); 5083 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
5096 FixedArray* properties = FixedArray::cast(source->properties()); 5084 FixedArray* properties = FixedArray::cast(source->properties());
5097 // Update elements if necessary. 5085 // Update elements if necessary.
5098 if (elements->length() > 0) { 5086 if (elements->length() > 0) {
5099 Object* elem; 5087 Object* elem;
(...skipping 1973 matching lines...) Expand 10 before | Expand all | Expand 10 after
7073 7061
7074 store_buffer()->TearDown(); 7062 store_buffer()->TearDown();
7075 incremental_marking()->TearDown(); 7063 incremental_marking()->TearDown();
7076 7064
7077 isolate_->memory_allocator()->TearDown(); 7065 isolate_->memory_allocator()->TearDown();
7078 7066
7079 delete relocation_mutex_; 7067 delete relocation_mutex_;
7080 } 7068 }
7081 7069
7082 7070
7083 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, 7071 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
7084 GCType gc_type,
7085 bool pass_isolate) {
7086 ASSERT(callback != NULL); 7072 ASSERT(callback != NULL);
7087 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate); 7073 GCPrologueCallbackPair pair(callback, gc_type);
7088 ASSERT(!gc_prologue_callbacks_.Contains(pair)); 7074 ASSERT(!gc_prologue_callbacks_.Contains(pair));
7089 return gc_prologue_callbacks_.Add(pair); 7075 return gc_prologue_callbacks_.Add(pair);
7090 } 7076 }
7091 7077
7092 7078
7093 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) { 7079 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
7094 ASSERT(callback != NULL); 7080 ASSERT(callback != NULL);
7095 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { 7081 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
7096 if (gc_prologue_callbacks_[i].callback == callback) { 7082 if (gc_prologue_callbacks_[i].callback == callback) {
7097 gc_prologue_callbacks_.Remove(i); 7083 gc_prologue_callbacks_.Remove(i);
7098 return; 7084 return;
7099 } 7085 }
7100 } 7086 }
7101 UNREACHABLE(); 7087 UNREACHABLE();
7102 } 7088 }
7103 7089
7104 7090
7105 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, 7091 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
7106 GCType gc_type,
7107 bool pass_isolate) {
7108 ASSERT(callback != NULL); 7092 ASSERT(callback != NULL);
7109 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate); 7093 GCEpilogueCallbackPair pair(callback, gc_type);
7110 ASSERT(!gc_epilogue_callbacks_.Contains(pair)); 7094 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
7111 return gc_epilogue_callbacks_.Add(pair); 7095 return gc_epilogue_callbacks_.Add(pair);
7112 } 7096 }
7113 7097
7114 7098
7115 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) { 7099 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
7116 ASSERT(callback != NULL); 7100 ASSERT(callback != NULL);
7117 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { 7101 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
7118 if (gc_epilogue_callbacks_[i].callback == callback) { 7102 if (gc_epilogue_callbacks_[i].callback == callback) {
7119 gc_epilogue_callbacks_.Remove(i); 7103 gc_epilogue_callbacks_.Remove(i);
7120 return; 7104 return;
7121 } 7105 }
7122 } 7106 }
7123 UNREACHABLE(); 7107 UNREACHABLE();
7124 } 7108 }
7125 7109
(...skipping 991 matching lines...) Expand 10 before | Expand all | Expand 10 after
8117 if (FLAG_concurrent_recompilation) { 8101 if (FLAG_concurrent_recompilation) {
8118 heap_->relocation_mutex_->Lock(); 8102 heap_->relocation_mutex_->Lock();
8119 #ifdef DEBUG 8103 #ifdef DEBUG
8120 heap_->relocation_mutex_locked_by_optimizer_thread_ = 8104 heap_->relocation_mutex_locked_by_optimizer_thread_ =
8121 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread(); 8105 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8122 #endif // DEBUG 8106 #endif // DEBUG
8123 } 8107 }
8124 } 8108 }
8125 8109
8126 } } // namespace v8::internal 8110 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-snapshot-generator.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698