Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(36)

Side by Side Diff: src/heap.cc

Issue 19595004: Rename AllocationSiteInfo to AllocationMemento (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/codegen.h ('k') | src/hydrogen.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 4225 matching lines...) Expand 10 before | Expand all | Expand 10 after
4236 4236
4237 4237
4238 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space, 4238 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4239 Handle<AllocationSite> allocation_site) { 4239 Handle<AllocationSite> allocation_site) {
4240 ASSERT(gc_state_ == NOT_IN_GC); 4240 ASSERT(gc_state_ == NOT_IN_GC);
4241 ASSERT(map->instance_type() != MAP_TYPE); 4241 ASSERT(map->instance_type() != MAP_TYPE);
4242 // If allocation failures are disallowed, we may allocate in a different 4242 // If allocation failures are disallowed, we may allocate in a different
4243 // space when new space is full and the object is not a large object. 4243 // space when new space is full and the object is not a large object.
4244 AllocationSpace retry_space = 4244 AllocationSpace retry_space =
4245 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); 4245 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4246 int size = map->instance_size() + AllocationSiteInfo::kSize; 4246 int size = map->instance_size() + AllocationMemento::kSize;
4247 Object* result; 4247 Object* result;
4248 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); 4248 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4249 if (!maybe_result->ToObject(&result)) return maybe_result; 4249 if (!maybe_result->ToObject(&result)) return maybe_result;
4250 // No need for write barrier since object is white and map is in old space. 4250 // No need for write barrier since object is white and map is in old space.
4251 HeapObject::cast(result)->set_map_no_write_barrier(map); 4251 HeapObject::cast(result)->set_map_no_write_barrier(map);
4252 AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>( 4252 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4253 reinterpret_cast<Address>(result) + map->instance_size()); 4253 reinterpret_cast<Address>(result) + map->instance_size());
4254 alloc_info->set_map_no_write_barrier(allocation_site_info_map()); 4254 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4255 alloc_info->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER); 4255 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4256 return result; 4256 return result;
4257 } 4257 }
4258 4258
4259 4259
4260 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { 4260 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4261 ASSERT(gc_state_ == NOT_IN_GC); 4261 ASSERT(gc_state_ == NOT_IN_GC);
4262 ASSERT(map->instance_type() != MAP_TYPE); 4262 ASSERT(map->instance_type() != MAP_TYPE);
4263 // If allocation failures are disallowed, we may allocate in a different 4263 // If allocation failures are disallowed, we may allocate in a different
4264 // space when new space is full and the object is not a large object. 4264 // space when new space is full and the object is not a large object.
4265 AllocationSpace retry_space = 4265 AllocationSpace retry_space =
(...skipping 691 matching lines...) Expand 10 before | Expand all | Expand 10 after
4957 ASSERT(map->CanTrackAllocationSite()); 4957 ASSERT(map->CanTrackAllocationSite());
4958 ASSERT(map->instance_type() == JS_ARRAY_TYPE); 4958 ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4959 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; 4959 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4960 4960
4961 // If we're forced to always allocate, we use the general allocation 4961 // If we're forced to always allocate, we use the general allocation
4962 // functions which may leave us with an object in old space. 4962 // functions which may leave us with an object in old space.
4963 int adjusted_object_size = object_size; 4963 int adjusted_object_size = object_size;
4964 if (always_allocate()) { 4964 if (always_allocate()) {
4965 // We'll only track origin if we are certain to allocate in new space 4965 // We'll only track origin if we are certain to allocate in new space
4966 const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4; 4966 const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4967 if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) { 4967 if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
4968 adjusted_object_size += AllocationSiteInfo::kSize; 4968 adjusted_object_size += AllocationMemento::kSize;
4969 } 4969 }
4970 4970
4971 { MaybeObject* maybe_clone = 4971 { MaybeObject* maybe_clone =
4972 AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE); 4972 AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4973 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 4973 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4974 } 4974 }
4975 Address clone_address = HeapObject::cast(clone)->address(); 4975 Address clone_address = HeapObject::cast(clone)->address();
4976 CopyBlock(clone_address, 4976 CopyBlock(clone_address,
4977 source->address(), 4977 source->address(),
4978 object_size); 4978 object_size);
4979 // Update write barrier for all fields that lie beyond the header. 4979 // Update write barrier for all fields that lie beyond the header.
4980 int write_barrier_offset = adjusted_object_size > object_size 4980 int write_barrier_offset = adjusted_object_size > object_size
4981 ? JSArray::kSize + AllocationSiteInfo::kSize 4981 ? JSArray::kSize + AllocationMemento::kSize
4982 : JSObject::kHeaderSize; 4982 : JSObject::kHeaderSize;
4983 if (((object_size - write_barrier_offset) / kPointerSize) > 0) { 4983 if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4984 RecordWrites(clone_address, 4984 RecordWrites(clone_address,
4985 write_barrier_offset, 4985 write_barrier_offset,
4986 (object_size - write_barrier_offset) / kPointerSize); 4986 (object_size - write_barrier_offset) / kPointerSize);
4987 } 4987 }
4988 4988
4989 // Track allocation site information, if we failed to allocate it inline. 4989 // Track allocation site information, if we failed to allocate it inline.
4990 if (InNewSpace(clone) && 4990 if (InNewSpace(clone) &&
4991 adjusted_object_size == object_size) { 4991 adjusted_object_size == object_size) {
4992 MaybeObject* maybe_alloc_info = 4992 MaybeObject* maybe_alloc_memento =
4993 AllocateStruct(ALLOCATION_SITE_INFO_TYPE); 4993 AllocateStruct(ALLOCATION_MEMENTO_TYPE);
4994 AllocationSiteInfo* alloc_info; 4994 AllocationMemento* alloc_memento;
4995 if (maybe_alloc_info->To(&alloc_info)) { 4995 if (maybe_alloc_memento->To(&alloc_memento)) {
4996 alloc_info->set_map_no_write_barrier(allocation_site_info_map()); 4996 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4997 alloc_info->set_allocation_site(site, SKIP_WRITE_BARRIER); 4997 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
4998 } 4998 }
4999 } 4999 }
5000 } else { 5000 } else {
5001 wb_mode = SKIP_WRITE_BARRIER; 5001 wb_mode = SKIP_WRITE_BARRIER;
5002 adjusted_object_size += AllocationSiteInfo::kSize; 5002 adjusted_object_size += AllocationMemento::kSize;
5003 5003
5004 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size); 5004 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
5005 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 5005 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5006 } 5006 }
5007 SLOW_ASSERT(InNewSpace(clone)); 5007 SLOW_ASSERT(InNewSpace(clone));
5008 // Since we know the clone is allocated in new space, we can copy 5008 // Since we know the clone is allocated in new space, we can copy
5009 // the contents without worrying about updating the write barrier. 5009 // the contents without worrying about updating the write barrier.
5010 CopyBlock(HeapObject::cast(clone)->address(), 5010 CopyBlock(HeapObject::cast(clone)->address(),
5011 source->address(), 5011 source->address(),
5012 object_size); 5012 object_size);
5013 } 5013 }
5014 5014
5015 if (adjusted_object_size > object_size) { 5015 if (adjusted_object_size > object_size) {
5016 AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>( 5016 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
5017 reinterpret_cast<Address>(clone) + object_size); 5017 reinterpret_cast<Address>(clone) + object_size);
5018 alloc_info->set_map_no_write_barrier(allocation_site_info_map()); 5018 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5019 alloc_info->set_allocation_site(site, SKIP_WRITE_BARRIER); 5019 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5020 } 5020 }
5021 5021
5022 SLOW_ASSERT( 5022 SLOW_ASSERT(
5023 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); 5023 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
5024 FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); 5024 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
5025 FixedArray* properties = FixedArray::cast(source->properties()); 5025 FixedArray* properties = FixedArray::cast(source->properties());
5026 // Update elements if necessary. 5026 // Update elements if necessary.
5027 if (elements->length() > 0) { 5027 if (elements->length() > 0) {
5028 Object* elem; 5028 Object* elem;
5029 { MaybeObject* maybe_elem; 5029 { MaybeObject* maybe_elem;
(...skipping 3139 matching lines...) Expand 10 before | Expand all | Expand 10 after
8169 if (FLAG_parallel_recompilation) { 8169 if (FLAG_parallel_recompilation) {
8170 heap_->relocation_mutex_->Lock(); 8170 heap_->relocation_mutex_->Lock();
8171 #ifdef DEBUG 8171 #ifdef DEBUG
8172 heap_->relocation_mutex_locked_by_optimizer_thread_ = 8172 heap_->relocation_mutex_locked_by_optimizer_thread_ =
8173 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread(); 8173 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8174 #endif // DEBUG 8174 #endif // DEBUG
8175 } 8175 }
8176 } 8176 }
8177 8177
8178 } } // namespace v8::internal 8178 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/codegen.h ('k') | src/hydrogen.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698