Chromium Code Reviews| Index: src/heap-inl.h |
| diff --git a/src/heap-inl.h b/src/heap-inl.h |
| index 64125bc302c7d275133974a54f0566eb0126c916..df8b211fb6df5ba7d999725d69ec8c3ae54c1869 100644 |
| --- a/src/heap-inl.h |
| +++ b/src/heap-inl.h |
| @@ -7,6 +7,7 @@ |
| #include <cmath> |
| +#include "src/cpu-profiler.h" |
| #include "src/heap.h" |
| #include "src/heap-profiler.h" |
| #include "src/isolate.h" |
| @@ -184,7 +185,6 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, |
| ASSERT(AllowHandleAllocation::IsAllowed()); |
| ASSERT(AllowHeapAllocation::IsAllowed()); |
| ASSERT(gc_state_ == NOT_IN_GC); |
| - HeapProfiler* profiler = isolate_->heap_profiler(); |
| #ifdef DEBUG |
| if (FLAG_gc_interval >= 0 && |
| AllowAllocationFailure::IsAllowed(isolate_) && |
| @@ -204,8 +204,8 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, |
| retry_space != NEW_SPACE) { |
| space = retry_space; |
| } else { |
| - if (profiler->is_tracking_allocations() && allocation.To(&object)) { |
| - profiler->AllocationEvent(object->address(), size_in_bytes); |
| + if (allocation.To(&object)) { |
| + OnAllocationEvent(object, size_in_bytes); |
| } |
| return allocation; |
| } |
| @@ -216,7 +216,12 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, |
| } else if (OLD_DATA_SPACE == space) { |
| allocation = old_data_space_->AllocateRaw(size_in_bytes); |
| } else if (CODE_SPACE == space) { |
| - allocation = code_space_->AllocateRaw(size_in_bytes); |
| + if (size_in_bytes <= code_space()->AreaSize()) { |
| + allocation = code_space_->AllocateRaw(size_in_bytes); |
| + } else { |
| + // Large code objects are allocated in large object space. |
| + allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE); |
| + } |
| } else if (LO_SPACE == space) { |
| allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE); |
| } else if (CELL_SPACE == space) { |
| @@ -227,14 +232,99 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, |
| ASSERT(MAP_SPACE == space); |
| allocation = map_space_->AllocateRaw(size_in_bytes); |
| } |
| - if (allocation.IsRetry()) old_gen_exhausted_ = true; |
| - if (profiler->is_tracking_allocations() && allocation.To(&object)) { |
| - profiler->AllocationEvent(object->address(), size_in_bytes); |
| + if (allocation.To(&object)) { |
| + OnAllocationEvent(object, size_in_bytes); |
| + } else { |
| + old_gen_exhausted_ = true; |
| } |
| return allocation; |
| } |
| +void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) { |
| + HeapProfiler* profiler = isolate_->heap_profiler(); |
| + if (profiler->is_tracking_allocations()) { |
| + profiler->AllocationEvent(object->address(), size_in_bytes); |
| + } |
| + |
| + if (FLAG_verify_predictable) { |
| + ++allocations_count_; |
| + |
| + UpdateAllocationsHash(object); |
| + UpdateAllocationsHash(size_in_bytes); |
| + |
| + if ((FLAG_dump_allocations_digest_at_alloc > 0) && |
| + (--dump_allocations_hash_countdown_ == 0)) { |
| + dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc; |
| + PrintAlloctionsHash(); |
| + } |
| + } |
| +} |
| + |
| + |
| +void Heap::OnMoveEvent(HeapObject* target, |
| + HeapObject* source, |
| + int size_in_bytes) { |
| + HeapProfiler* heap_profiler = isolate_->heap_profiler(); |
| + if (heap_profiler->is_tracking_object_moves()) { |
| + heap_profiler->ObjectMoveEvent(source->address(), target->address(), |
| + size_in_bytes); |
| + } |
| + |
| + if (isolate_->logger()->is_logging_code_events() || |
| + isolate_->cpu_profiler()->is_profiling()) { |
| + if (target->IsSharedFunctionInfo()) { |
| + PROFILE(isolate_, SharedFunctionInfoMoveEvent( |
| + source->address(), target->address())); |
| + } |
| + } |
| + |
| + if (FLAG_verify_predictable) { |
| + ++allocations_count_; |
| + |
| + UpdateAllocationsHash(source); |
| + UpdateAllocationsHash(target); |
| + UpdateAllocationsHash(size_in_bytes); |
| + |
| + if ((FLAG_dump_allocations_digest_at_alloc > 0) && |
| + (--dump_allocations_hash_countdown_ == 0)) { |
| + dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc; |
| + PrintAlloctionsHash(); |
| + } |
| + } |
| +} |
| + |
| + |
| +void Heap::UpdateAllocationsHash(HeapObject* object) { |
| + Address object_address = object->address(); |
| + MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address); |
| + AllocationSpace allocation_space = memory_chunk->owner()->identity(); |
| + |
| + STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32); |
| + uint32_t value = |
| + static_cast<uint32_t>(object_address - memory_chunk->address()) | |
| + (static_cast<uint32_t>(allocation_space) << kPageSizeBits); |
|
Hannes Payer (out of office)
2014/06/18 09:45:10
Why do you need the or (static_cast<uint32_t>(allo
Igor Sheludko
2014/06/18 12:38:49
I did that in order to involve both object offset
|
| + |
| + UpdateAllocationsHash(value); |
| +} |
| + |
| + |
| +void Heap::UpdateAllocationsHash(uint32_t value) { |
| + uint16_t c1 = static_cast<uint16_t>(value); |
| + uint16_t c2 = static_cast<uint16_t>(value >> 16); |
| + raw_allocations_hash_ = |
| + StringHasher::AddCharacterCore(raw_allocations_hash_, c1); |
| + raw_allocations_hash_ = |
| + StringHasher::AddCharacterCore(raw_allocations_hash_, c2); |
| +} |
| + |
| + |
| +void Heap::PrintAlloctionsHash() { |
| + uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_); |
| + PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count_, hash); |
| +} |
| + |
| + |
| void Heap::FinalizeExternalString(String* string) { |
| ASSERT(string->IsExternalString()); |
| v8::String::ExternalStringResourceBase** resource_addr = |