Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(988)

Side by Side Diff: src/heap.cc

Issue 153913002: A64: Synchronize with r16756. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/handles.cc ('k') | src/heap-snapshot-generator.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 4297 matching lines...) Expand 10 before | Expand all | Expand 10 after
4308 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); 4308 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4309 int size = map->instance_size() + AllocationMemento::kSize; 4309 int size = map->instance_size() + AllocationMemento::kSize;
4310 Object* result; 4310 Object* result;
4311 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); 4311 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4312 if (!maybe_result->ToObject(&result)) return maybe_result; 4312 if (!maybe_result->ToObject(&result)) return maybe_result;
4313 // No need for write barrier since object is white and map is in old space. 4313 // No need for write barrier since object is white and map is in old space.
4314 HeapObject::cast(result)->set_map_no_write_barrier(map); 4314 HeapObject::cast(result)->set_map_no_write_barrier(map);
4315 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( 4315 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4316 reinterpret_cast<Address>(result) + map->instance_size()); 4316 reinterpret_cast<Address>(result) + map->instance_size());
4317 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 4317 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4318 ASSERT(allocation_site->map() == allocation_site_map());
4318 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER); 4319 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4319 return result; 4320 return result;
4320 } 4321 }
4321 4322
4322 4323
4323 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { 4324 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4324 ASSERT(gc_state_ == NOT_IN_GC); 4325 ASSERT(gc_state_ == NOT_IN_GC);
4325 ASSERT(map->instance_type() != MAP_TYPE); 4326 ASSERT(map->instance_type() != MAP_TYPE);
4326 // If allocation failures are disallowed, we may allocate in a different 4327 // If allocation failures are disallowed, we may allocate in a different
4327 // space when new space is full and the object is not a large object. 4328 // space when new space is full and the object is not a large object.
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
4369 MaybeObject* maybe_map = object_function->initial_map()->Copy(); 4370 MaybeObject* maybe_map = object_function->initial_map()->Copy();
4370 if (!maybe_map->To(&new_map)) return maybe_map; 4371 if (!maybe_map->To(&new_map)) return maybe_map;
4371 } 4372 }
4372 4373
4373 Object* prototype; 4374 Object* prototype;
4374 MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map); 4375 MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4375 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; 4376 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4376 4377
4377 if (!function->shared()->is_generator()) { 4378 if (!function->shared()->is_generator()) {
4378 MaybeObject* maybe_failure = 4379 MaybeObject* maybe_failure =
4379 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes( 4380 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributesTrampoline(
4380 constructor_string(), function, DONT_ENUM); 4381 constructor_string(), function, DONT_ENUM);
4381 if (maybe_failure->IsFailure()) return maybe_failure; 4382 if (maybe_failure->IsFailure()) return maybe_failure;
4382 } 4383 }
4383 4384
4384 return prototype; 4385 return prototype;
4385 } 4386 }
4386 4387
4387 4388
4388 MaybeObject* Heap::AllocateFunction(Map* function_map, 4389 MaybeObject* Heap::AllocateFunction(Map* function_map,
4389 SharedFunctionInfo* shared, 4390 SharedFunctionInfo* shared,
(...skipping 661 matching lines...) Expand 10 before | Expand all | Expand 10 after
5051 } 5052 }
5052 5053
5053 // Track allocation site information, if we failed to allocate it inline. 5054 // Track allocation site information, if we failed to allocate it inline.
5054 if (InNewSpace(clone) && 5055 if (InNewSpace(clone) &&
5055 adjusted_object_size == object_size) { 5056 adjusted_object_size == object_size) {
5056 MaybeObject* maybe_alloc_memento = 5057 MaybeObject* maybe_alloc_memento =
5057 AllocateStruct(ALLOCATION_MEMENTO_TYPE); 5058 AllocateStruct(ALLOCATION_MEMENTO_TYPE);
5058 AllocationMemento* alloc_memento; 5059 AllocationMemento* alloc_memento;
5059 if (maybe_alloc_memento->To(&alloc_memento)) { 5060 if (maybe_alloc_memento->To(&alloc_memento)) {
5060 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 5061 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5062 ASSERT(site->map() == allocation_site_map());
5061 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER); 5063 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5062 } 5064 }
5063 } 5065 }
5064 } else { 5066 } else {
5065 wb_mode = SKIP_WRITE_BARRIER; 5067 wb_mode = SKIP_WRITE_BARRIER;
5066 adjusted_object_size += AllocationMemento::kSize; 5068 adjusted_object_size += AllocationMemento::kSize;
5067 5069
5068 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size); 5070 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
5069 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 5071 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5070 } 5072 }
5071 SLOW_ASSERT(InNewSpace(clone)); 5073 SLOW_ASSERT(InNewSpace(clone));
5072 // Since we know the clone is allocated in new space, we can copy 5074 // Since we know the clone is allocated in new space, we can copy
5073 // the contents without worrying about updating the write barrier. 5075 // the contents without worrying about updating the write barrier.
5074 CopyBlock(HeapObject::cast(clone)->address(), 5076 CopyBlock(HeapObject::cast(clone)->address(),
5075 source->address(), 5077 source->address(),
5076 object_size); 5078 object_size);
5077 } 5079 }
5078 5080
5079 if (adjusted_object_size > object_size) { 5081 if (adjusted_object_size > object_size) {
5080 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( 5082 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
5081 reinterpret_cast<Address>(clone) + object_size); 5083 reinterpret_cast<Address>(clone) + object_size);
5082 alloc_memento->set_map_no_write_barrier(allocation_memento_map()); 5084 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5085 ASSERT(site->map() == allocation_site_map());
5083 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER); 5086 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5084 } 5087 }
5085 5088
5086 SLOW_ASSERT( 5089 SLOW_ASSERT(
5087 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); 5090 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
5088 FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); 5091 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
5089 FixedArray* properties = FixedArray::cast(source->properties()); 5092 FixedArray* properties = FixedArray::cast(source->properties());
5090 // Update elements if necessary. 5093 // Update elements if necessary.
5091 if (elements->length() > 0) { 5094 if (elements->length() > 0) {
5092 Object* elem; 5095 Object* elem;
(...skipping 3013 matching lines...) Expand 10 before | Expand all | Expand 10 after
8106 if (FLAG_concurrent_recompilation) { 8109 if (FLAG_concurrent_recompilation) {
8107 heap_->relocation_mutex_->Lock(); 8110 heap_->relocation_mutex_->Lock();
8108 #ifdef DEBUG 8111 #ifdef DEBUG
8109 heap_->relocation_mutex_locked_by_optimizer_thread_ = 8112 heap_->relocation_mutex_locked_by_optimizer_thread_ =
8110 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread(); 8113 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8111 #endif // DEBUG 8114 #endif // DEBUG
8112 } 8115 }
8113 } 8116 }
8114 8117
8115 } } // namespace v8::internal 8118 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/handles.cc ('k') | src/heap-snapshot-generator.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698