Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(35)

Side by Side Diff: src/heap.cc

Issue 351893003: Update survival statistics correctly in the Scavenger. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/once.h" 9 #include "src/base/once.h"
10 #include "src/bootstrapper.h" 10 #include "src/bootstrapper.h"
(...skipping 1967 matching lines...) Expand 10 before | Expand all | Expand 10 after
1978 int object_size) { 1978 int object_size) {
1979 SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); 1979 SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
1980 SLOW_ASSERT(object->Size() == object_size); 1980 SLOW_ASSERT(object->Size() == object_size);
1981 1981
1982 int allocation_size = object_size; 1982 int allocation_size = object_size;
1983 if (alignment != kObjectAlignment) { 1983 if (alignment != kObjectAlignment) {
1984 ASSERT(alignment == kDoubleAlignment); 1984 ASSERT(alignment == kDoubleAlignment);
1985 allocation_size += kPointerSize; 1985 allocation_size += kPointerSize;
1986 } 1986 }
1987 1987
1988 AllocationResult allocation;
Igor Sheludko 2014/06/24 16:45:51 Move this closer to the usages.
Hannes Payer (out of office) 2014/06/24 19:23:03 Done. I removed the local version and use just one
1988 Heap* heap = map->GetHeap(); 1989 Heap* heap = map->GetHeap();
1989 if (heap->ShouldBePromoted(object->address(), object_size)) {
1990 AllocationResult allocation;
1991 1990
1992 if (object_contents == DATA_OBJECT) { 1991 if (!heap->ShouldBePromoted(object->address(), object_size)) {
1993 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); 1992 ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
1994 allocation = heap->old_data_space()->AllocateRaw(allocation_size); 1993 AllocationResult allocation =
1995 } else { 1994 heap->new_space()->AllocateRaw(allocation_size);
1996 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
1997 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
1998 }
1999 1995
2000 HeapObject* target = NULL; // Initialization to please compiler. 1996 // Allocation in the other semi-space may fail due to fragmentation.
2001 if (allocation.To(&target)) { 1997 // In that case we allocate in the old generation.
1998 if (!allocation.IsRetry()) {
Igor Sheludko 2014/06/24 16:45:51 To be uniform: HeapObject* target = NULL; if (
Hannes Payer (out of office) 2014/06/24 19:23:03 Done.
1999 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2000 HeapObject* target = HeapObject::cast(allocation.ToObjectChecked());
2001
2002 if (alignment != kObjectAlignment) { 2002 if (alignment != kObjectAlignment) {
2003 target = EnsureDoubleAligned(heap, target, allocation_size); 2003 target = EnsureDoubleAligned(heap, target, allocation_size);
2004 } 2004 }
2005 2005
2006 // Order is important: slot might be inside of the target if target 2006 // Order is important: slot might be inside of the target if target
2007 // was allocated over a dead object and slot comes from the store 2007 // was allocated over a dead object and slot comes from the store
2008 // buffer. 2008 // buffer.
2009 *slot = target; 2009 *slot = target;
2010 MigrateObject(heap, object, target, object_size); 2010 MigrateObject(heap, object, target, object_size);
2011 2011 heap->IncrementSemiSpaceCopiedObjectSize(object_size);
2012 if (object_contents == POINTER_OBJECT) {
2013 if (map->instance_type() == JS_FUNCTION_TYPE) {
2014 heap->promotion_queue()->insert(
2015 target, JSFunction::kNonWeakFieldsEndOffset);
2016 } else {
2017 heap->promotion_queue()->insert(target, object_size);
2018 }
2019 }
2020
2021 heap->IncrementPromotedObjectsSize(object_size);
2022 return; 2012 return;
2023 } 2013 }
2024 } 2014 }
2025 ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
2026 AllocationResult allocation =
2027 heap->new_space()->AllocateRaw(allocation_size);
2028 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2029 2015
2030 // Allocation in the other semi-space may fail due to fragmentation. 2016 if (object_contents == DATA_OBJECT) {
2031 // In that case we allocate in the old generation. 2017 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2032 if (allocation.IsRetry()) { 2018 allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2033 if (object_contents == DATA_OBJECT) { 2019 } else {
2034 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); 2020 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2035 allocation = heap->old_data_space()->AllocateRaw(allocation_size); 2021 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2036 } else {
2037 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2038 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2039 }
2040 } 2022 }
2041 2023
2042 HeapObject* target = HeapObject::cast(allocation.ToObjectChecked()); 2024 HeapObject* target = NULL; // Initialization to please compiler.
2025 if (allocation.To(&target)) {
2026 if (alignment != kObjectAlignment) {
2027 target = EnsureDoubleAligned(heap, target, allocation_size);
2028 }
2043 2029
2044 if (alignment != kObjectAlignment) { 2030 // Order is important: slot might be inside of the target if target
2045 target = EnsureDoubleAligned(heap, target, allocation_size); 2031 // was allocated over a dead object and slot comes from the store
2032 // buffer.
2033 *slot = target;
2034 MigrateObject(heap, object, target, object_size);
2035
2036 if (object_contents == POINTER_OBJECT) {
2037 if (map->instance_type() == JS_FUNCTION_TYPE) {
2038 heap->promotion_queue()->insert(target,
2039 JSFunction::kNonWeakFieldsEndOffset);
2040 } else {
2041 heap->promotion_queue()->insert(target, object_size);
2042 }
2043 }
2044
2045 heap->IncrementPromotedObjectsSize(object_size);
2046 return;
2046 } 2047 }
2047 2048
2048 // Order is important: slot might be inside of the target if target 2049 // The scavenger should always have enough space available in the old
2049 // was allocated over a dead object and slot comes from the store 2050 // generation for promotion. Otherwise a full gc would have been triggered.
2050 // buffer. 2051 UNREACHABLE();
2051 *slot = target;
2052 MigrateObject(heap, object, target, object_size);
2053 heap->IncrementSemiSpaceCopiedObjectSize(object_size);
2054 return;
2055 } 2052 }
2056 2053
2057 2054
2058 static inline void EvacuateJSFunction(Map* map, 2055 static inline void EvacuateJSFunction(Map* map,
2059 HeapObject** slot, 2056 HeapObject** slot,
2060 HeapObject* object) { 2057 HeapObject* object) {
2061 ObjectEvacuationStrategy<POINTER_OBJECT>:: 2058 ObjectEvacuationStrategy<POINTER_OBJECT>::
2062 template VisitSpecialized<JSFunction::kSize>(map, slot, object); 2059 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2063 2060
2064 HeapObject* target = *slot; 2061 HeapObject* target = *slot;
(...skipping 4305 matching lines...) Expand 10 before | Expand all | Expand 10 after
6370 static_cast<int>(object_sizes_last_time_[index])); 6367 static_cast<int>(object_sizes_last_time_[index]));
6371 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) 6368 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6372 #undef ADJUST_LAST_TIME_OBJECT_COUNT 6369 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6373 6370
6374 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); 6371 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6375 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); 6372 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6376 ClearObjectStats(); 6373 ClearObjectStats();
6377 } 6374 }
6378 6375
6379 } } // namespace v8::internal 6376 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698