Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
| 8 #include "src/api.h" | 8 #include "src/api.h" |
| 9 #include "src/base/once.h" | 9 #include "src/base/once.h" |
| 10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
| (...skipping 1952 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1963 heap->OnMoveEvent(target, source, size); | 1963 heap->OnMoveEvent(target, source, size); |
| 1964 } | 1964 } |
| 1965 | 1965 |
| 1966 if (marks_handling == TRANSFER_MARKS) { | 1966 if (marks_handling == TRANSFER_MARKS) { |
| 1967 if (Marking::TransferColor(source, target)) { | 1967 if (Marking::TransferColor(source, target)) { |
| 1968 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size); | 1968 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size); |
| 1969 } | 1969 } |
| 1970 } | 1970 } |
| 1971 } | 1971 } |
| 1972 | 1972 |
| 1973 template<int alignment> | |
| 1974 static inline bool SemiSpaceCopyObject(Map* map, | |
| 1975 HeapObject** slot, | |
| 1976 HeapObject* object, | |
| 1977 int object_size) { | |
| 1978 Heap* heap = map->GetHeap(); | |
| 1979 | |
| 1980 int allocation_size = object_size; | |
| 1981 if (alignment != kObjectAlignment) { | |
| 1982 ASSERT(alignment == kDoubleAlignment); | |
| 1983 allocation_size += kPointerSize; | |
| 1984 } | |
| 1985 | |
| 1986 ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE)); | |
| 1987 AllocationResult allocation = | |
| 1988 heap->new_space()->AllocateRaw(allocation_size); | |
| 1989 | |
| 1990 // Allocation in the other semi-space may fail due to fragmentation. | |
| 1991 // In that case we allocate in the old generation. | |
|
Igor Sheludko
2014/06/25 09:06:37
Probably this comment should not be here since you
Hannes Payer (out of office)
2014/06/25 09:25:44
Done.
| |
| 1992 HeapObject* target = NULL; // Initialization to please compiler. | |
| 1993 if (allocation.To(&target)) { | |
| 1994 if (alignment != kObjectAlignment) { | |
| 1995 target = EnsureDoubleAligned(heap, target, allocation_size); | |
| 1996 } | |
| 1997 | |
| 1998 // Order is important: slot might be inside of the target if target | |
| 1999 // was allocated over a dead object and slot comes from the store | |
| 2000 // buffer. | |
| 2001 *slot = target; | |
| 2002 MigrateObject(heap, object, target, object_size); | |
| 2003 | |
| 2004 heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); | |
| 2005 heap->IncrementSemiSpaceCopiedObjectSize(object_size); | |
| 2006 return true; | |
| 2007 } | |
| 2008 return false; | |
| 2009 } | |
| 2010 | |
| 1973 | 2011 |
| 1974 template<ObjectContents object_contents, int alignment> | 2012 template<ObjectContents object_contents, int alignment> |
| 1975 static inline void EvacuateObject(Map* map, | 2013 static inline bool PromoteObject(Map* map, |
| 1976 HeapObject** slot, | 2014 HeapObject** slot, |
| 1977 HeapObject* object, | 2015 HeapObject* object, |
| 1978 int object_size) { | 2016 int object_size) { |
| 1979 SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); | 2017 Heap* heap = map->GetHeap(); |
| 1980 SLOW_ASSERT(object->Size() == object_size); | |
| 1981 | 2018 |
| 1982 int allocation_size = object_size; | 2019 int allocation_size = object_size; |
| 1983 if (alignment != kObjectAlignment) { | 2020 if (alignment != kObjectAlignment) { |
| 1984 ASSERT(alignment == kDoubleAlignment); | 2021 ASSERT(alignment == kDoubleAlignment); |
| 1985 allocation_size += kPointerSize; | 2022 allocation_size += kPointerSize; |
| 1986 } | 2023 } |
| 1987 | 2024 |
| 1988 Heap* heap = map->GetHeap(); | 2025 AllocationResult allocation; |
| 1989 if (heap->ShouldBePromoted(object->address(), object_size)) { | 2026 if (object_contents == DATA_OBJECT) { |
| 1990 AllocationResult allocation; | 2027 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); |
| 2028 allocation = heap->old_data_space()->AllocateRaw(allocation_size); | |
| 2029 } else { | |
| 2030 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); | |
| 2031 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); | |
| 2032 } | |
| 1991 | 2033 |
| 1992 if (object_contents == DATA_OBJECT) { | 2034 HeapObject* target = NULL; // Initialization to please compiler. |
| 1993 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); | 2035 if (allocation.To(&target)) { |
| 1994 allocation = heap->old_data_space()->AllocateRaw(allocation_size); | 2036 if (alignment != kObjectAlignment) { |
| 1995 } else { | 2037 target = EnsureDoubleAligned(heap, target, allocation_size); |
| 1996 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); | |
| 1997 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); | |
| 1998 } | 2038 } |
| 1999 | 2039 |
| 2000 HeapObject* target = NULL; // Initialization to please compiler. | 2040 // Order is important: slot might be inside of the target if target |
| 2001 if (allocation.To(&target)) { | 2041 // was allocated over a dead object and slot comes from the store |
| 2002 if (alignment != kObjectAlignment) { | 2042 // buffer. |
| 2003 target = EnsureDoubleAligned(heap, target, allocation_size); | 2043 *slot = target; |
| 2044 MigrateObject(heap, object, target, object_size); | |
| 2045 | |
| 2046 if (object_contents == POINTER_OBJECT) { | |
| 2047 if (map->instance_type() == JS_FUNCTION_TYPE) { | |
| 2048 heap->promotion_queue()->insert( | |
| 2049 target, JSFunction::kNonWeakFieldsEndOffset); | |
| 2050 } else { | |
| 2051 heap->promotion_queue()->insert(target, object_size); | |
| 2004 } | 2052 } |
| 2053 } | |
| 2054 heap->IncrementPromotedObjectsSize(object_size); | |
| 2055 return true; | |
| 2056 } | |
| 2057 return false; | |
| 2058 } | |
| 2005 | 2059 |
| 2006 // Order is important: slot might be inside of the target if target | |
| 2007 // was allocated over a dead object and slot comes from the store | |
| 2008 // buffer. | |
| 2009 *slot = target; | |
| 2010 MigrateObject(heap, object, target, object_size); | |
| 2011 | 2060 |
| 2012 if (object_contents == POINTER_OBJECT) { | 2061 template<ObjectContents object_contents, int alignment> |
| 2013 if (map->instance_type() == JS_FUNCTION_TYPE) { | 2062 static inline void EvacuateObject(Map* map, |
| 2014 heap->promotion_queue()->insert( | 2063 HeapObject** slot, |
| 2015 target, JSFunction::kNonWeakFieldsEndOffset); | 2064 HeapObject* object, |
| 2016 } else { | 2065 int object_size) { |
| 2017 heap->promotion_queue()->insert(target, object_size); | 2066 SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); |
| 2018 } | 2067 SLOW_ASSERT(object->Size() == object_size); |
| 2019 } | 2068 Heap* heap = map->GetHeap(); |
| 2020 | 2069 |
| 2021 heap->IncrementPromotedObjectsSize(object_size); | 2070 if (!heap->ShouldBePromoted(object->address(), object_size)) { |
| 2071 // A semi-space copy may fail due to fragmentation. In that case, we | |
| 2072 // try to promote the object. | |
| 2073 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) { | |
| 2022 return; | 2074 return; |
| 2023 } | 2075 } |
| 2024 } | 2076 } |
| 2025 ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE)); | |
| 2026 AllocationResult allocation = | |
| 2027 heap->new_space()->AllocateRaw(allocation_size); | |
| 2028 heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); | |
| 2029 | 2077 |
| 2030 // Allocation in the other semi-space may fail due to fragmentation. | 2078 if (PromoteObject<object_contents, alignment>( |
| 2031 // In that case we allocate in the old generation. | 2079 map, slot, object, object_size)) { |
| 2032 if (allocation.IsRetry()) { | 2080 return; |
| 2033 if (object_contents == DATA_OBJECT) { | |
| 2034 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); | |
| 2035 allocation = heap->old_data_space()->AllocateRaw(allocation_size); | |
| 2036 } else { | |
| 2037 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); | |
| 2038 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); | |
| 2039 } | |
| 2040 } | 2081 } |
| 2041 | 2082 |
| 2042 HeapObject* target = HeapObject::cast(allocation.ToObjectChecked()); | 2083 // If promotion failed, we try to copy the object to the other semi-space |
| 2084 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return; | |
| 2043 | 2085 |
| 2044 if (alignment != kObjectAlignment) { | 2086 UNREACHABLE(); |
| 2045 target = EnsureDoubleAligned(heap, target, allocation_size); | |
| 2046 } | |
| 2047 | |
| 2048 // Order is important: slot might be inside of the target if target | |
| 2049 // was allocated over a dead object and slot comes from the store | |
| 2050 // buffer. | |
| 2051 *slot = target; | |
| 2052 MigrateObject(heap, object, target, object_size); | |
| 2053 heap->IncrementSemiSpaceCopiedObjectSize(object_size); | |
| 2054 return; | |
| 2055 } | 2087 } |
| 2056 | 2088 |
| 2057 | 2089 |
| 2058 static inline void EvacuateJSFunction(Map* map, | 2090 static inline void EvacuateJSFunction(Map* map, |
| 2059 HeapObject** slot, | 2091 HeapObject** slot, |
| 2060 HeapObject* object) { | 2092 HeapObject* object) { |
| 2061 ObjectEvacuationStrategy<POINTER_OBJECT>:: | 2093 ObjectEvacuationStrategy<POINTER_OBJECT>:: |
| 2062 template VisitSpecialized<JSFunction::kSize>(map, slot, object); | 2094 template VisitSpecialized<JSFunction::kSize>(map, slot, object); |
| 2063 | 2095 |
| 2064 HeapObject* target = *slot; | 2096 HeapObject* target = *slot; |
| (...skipping 4305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6370 static_cast<int>(object_sizes_last_time_[index])); | 6402 static_cast<int>(object_sizes_last_time_[index])); |
| 6371 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) | 6403 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) |
| 6372 #undef ADJUST_LAST_TIME_OBJECT_COUNT | 6404 #undef ADJUST_LAST_TIME_OBJECT_COUNT |
| 6373 | 6405 |
| 6374 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); | 6406 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); |
| 6375 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); | 6407 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); |
| 6376 ClearObjectStats(); | 6408 ClearObjectStats(); |
| 6377 } | 6409 } |
| 6378 | 6410 |
| 6379 } } // namespace v8::internal | 6411 } } // namespace v8::internal |
| OLD | NEW |