OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/api.h" | 8 #include "src/api.h" |
9 #include "src/base/once.h" | 9 #include "src/base/once.h" |
10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
(...skipping 1952 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1963 heap->OnMoveEvent(target, source, size); | 1963 heap->OnMoveEvent(target, source, size); |
1964 } | 1964 } |
1965 | 1965 |
1966 if (marks_handling == TRANSFER_MARKS) { | 1966 if (marks_handling == TRANSFER_MARKS) { |
1967 if (Marking::TransferColor(source, target)) { | 1967 if (Marking::TransferColor(source, target)) { |
1968 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size); | 1968 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size); |
1969 } | 1969 } |
1970 } | 1970 } |
1971 } | 1971 } |
1972 | 1972 |
| 1973 template<int alignment> |
| 1974 static inline bool SemiSpaceCopyObject(Map* map, |
| 1975 HeapObject** slot, |
| 1976 HeapObject* object, |
| 1977 int object_size) { |
| 1978 Heap* heap = map->GetHeap(); |
| 1979 |
| 1980 int allocation_size = object_size; |
| 1981 if (alignment != kObjectAlignment) { |
| 1982 ASSERT(alignment == kDoubleAlignment); |
| 1983 allocation_size += kPointerSize; |
| 1984 } |
| 1985 |
| 1986 ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE)); |
| 1987 AllocationResult allocation = |
| 1988 heap->new_space()->AllocateRaw(allocation_size); |
| 1989 |
| 1990 HeapObject* target = NULL; // Initialization to please compiler. |
| 1991 if (allocation.To(&target)) { |
| 1992 if (alignment != kObjectAlignment) { |
| 1993 target = EnsureDoubleAligned(heap, target, allocation_size); |
| 1994 } |
| 1995 |
| 1996 // Order is important: slot might be inside of the target if target |
| 1997 // was allocated over a dead object and slot comes from the store |
| 1998 // buffer. |
| 1999 *slot = target; |
| 2000 MigrateObject(heap, object, target, object_size); |
| 2001 |
| 2002 heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); |
| 2003 heap->IncrementSemiSpaceCopiedObjectSize(object_size); |
| 2004 return true; |
| 2005 } |
| 2006 return false; |
| 2007 } |
| 2008 |
1973 | 2009 |
1974 template<ObjectContents object_contents, int alignment> | 2010 template<ObjectContents object_contents, int alignment> |
1975 static inline void EvacuateObject(Map* map, | 2011 static inline bool PromoteObject(Map* map, |
1976 HeapObject** slot, | 2012 HeapObject** slot, |
1977 HeapObject* object, | 2013 HeapObject* object, |
1978 int object_size) { | 2014 int object_size) { |
1979 SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); | 2015 Heap* heap = map->GetHeap(); |
1980 SLOW_ASSERT(object->Size() == object_size); | |
1981 | 2016 |
1982 int allocation_size = object_size; | 2017 int allocation_size = object_size; |
1983 if (alignment != kObjectAlignment) { | 2018 if (alignment != kObjectAlignment) { |
1984 ASSERT(alignment == kDoubleAlignment); | 2019 ASSERT(alignment == kDoubleAlignment); |
1985 allocation_size += kPointerSize; | 2020 allocation_size += kPointerSize; |
1986 } | 2021 } |
1987 | 2022 |
1988 Heap* heap = map->GetHeap(); | 2023 AllocationResult allocation; |
1989 if (heap->ShouldBePromoted(object->address(), object_size)) { | 2024 if (object_contents == DATA_OBJECT) { |
1990 AllocationResult allocation; | 2025 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); |
| 2026 allocation = heap->old_data_space()->AllocateRaw(allocation_size); |
| 2027 } else { |
| 2028 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); |
| 2029 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); |
| 2030 } |
1991 | 2031 |
1992 if (object_contents == DATA_OBJECT) { | 2032 HeapObject* target = NULL; // Initialization to please compiler. |
1993 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); | 2033 if (allocation.To(&target)) { |
1994 allocation = heap->old_data_space()->AllocateRaw(allocation_size); | 2034 if (alignment != kObjectAlignment) { |
1995 } else { | 2035 target = EnsureDoubleAligned(heap, target, allocation_size); |
1996 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); | |
1997 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); | |
1998 } | 2036 } |
1999 | 2037 |
2000 HeapObject* target = NULL; // Initialization to please compiler. | 2038 // Order is important: slot might be inside of the target if target |
2001 if (allocation.To(&target)) { | 2039 // was allocated over a dead object and slot comes from the store |
2002 if (alignment != kObjectAlignment) { | 2040 // buffer. |
2003 target = EnsureDoubleAligned(heap, target, allocation_size); | 2041 *slot = target; |
| 2042 MigrateObject(heap, object, target, object_size); |
| 2043 |
| 2044 if (object_contents == POINTER_OBJECT) { |
| 2045 if (map->instance_type() == JS_FUNCTION_TYPE) { |
| 2046 heap->promotion_queue()->insert( |
| 2047 target, JSFunction::kNonWeakFieldsEndOffset); |
| 2048 } else { |
| 2049 heap->promotion_queue()->insert(target, object_size); |
2004 } | 2050 } |
| 2051 } |
| 2052 heap->IncrementPromotedObjectsSize(object_size); |
| 2053 return true; |
| 2054 } |
| 2055 return false; |
| 2056 } |
2005 | 2057 |
2006 // Order is important: slot might be inside of the target if target | |
2007 // was allocated over a dead object and slot comes from the store | |
2008 // buffer. | |
2009 *slot = target; | |
2010 MigrateObject(heap, object, target, object_size); | |
2011 | 2058 |
2012 if (object_contents == POINTER_OBJECT) { | 2059 template<ObjectContents object_contents, int alignment> |
2013 if (map->instance_type() == JS_FUNCTION_TYPE) { | 2060 static inline void EvacuateObject(Map* map, |
2014 heap->promotion_queue()->insert( | 2061 HeapObject** slot, |
2015 target, JSFunction::kNonWeakFieldsEndOffset); | 2062 HeapObject* object, |
2016 } else { | 2063 int object_size) { |
2017 heap->promotion_queue()->insert(target, object_size); | 2064 SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); |
2018 } | 2065 SLOW_ASSERT(object->Size() == object_size); |
2019 } | 2066 Heap* heap = map->GetHeap(); |
2020 | 2067 |
2021 heap->IncrementPromotedObjectsSize(object_size); | 2068 if (!heap->ShouldBePromoted(object->address(), object_size)) { |
| 2069 // A semi-space copy may fail due to fragmentation. In that case, we |
| 2070 // try to promote the object. |
| 2071 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) { |
2022 return; | 2072 return; |
2023 } | 2073 } |
2024 } | 2074 } |
2025 ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE)); | |
2026 AllocationResult allocation = | |
2027 heap->new_space()->AllocateRaw(allocation_size); | |
2028 heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); | |
2029 | 2075 |
2030 // Allocation in the other semi-space may fail due to fragmentation. | 2076 if (PromoteObject<object_contents, alignment>( |
2031 // In that case we allocate in the old generation. | 2077 map, slot, object, object_size)) { |
2032 if (allocation.IsRetry()) { | 2078 return; |
2033 if (object_contents == DATA_OBJECT) { | |
2034 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); | |
2035 allocation = heap->old_data_space()->AllocateRaw(allocation_size); | |
2036 } else { | |
2037 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); | |
2038 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); | |
2039 } | |
2040 } | 2079 } |
2041 | 2080 |
2042 HeapObject* target = HeapObject::cast(allocation.ToObjectChecked()); | 2081 // If promotion failed, we try to copy the object to the other semi-space |
| 2082 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return; |
2043 | 2083 |
2044 if (alignment != kObjectAlignment) { | 2084 UNREACHABLE(); |
2045 target = EnsureDoubleAligned(heap, target, allocation_size); | |
2046 } | |
2047 | |
2048 // Order is important: slot might be inside of the target if target | |
2049 // was allocated over a dead object and slot comes from the store | |
2050 // buffer. | |
2051 *slot = target; | |
2052 MigrateObject(heap, object, target, object_size); | |
2053 heap->IncrementSemiSpaceCopiedObjectSize(object_size); | |
2054 return; | |
2055 } | 2085 } |
2056 | 2086 |
2057 | 2087 |
2058 static inline void EvacuateJSFunction(Map* map, | 2088 static inline void EvacuateJSFunction(Map* map, |
2059 HeapObject** slot, | 2089 HeapObject** slot, |
2060 HeapObject* object) { | 2090 HeapObject* object) { |
2061 ObjectEvacuationStrategy<POINTER_OBJECT>:: | 2091 ObjectEvacuationStrategy<POINTER_OBJECT>:: |
2062 template VisitSpecialized<JSFunction::kSize>(map, slot, object); | 2092 template VisitSpecialized<JSFunction::kSize>(map, slot, object); |
2063 | 2093 |
2064 HeapObject* target = *slot; | 2094 HeapObject* target = *slot; |
(...skipping 4305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6370 static_cast<int>(object_sizes_last_time_[index])); | 6400 static_cast<int>(object_sizes_last_time_[index])); |
6371 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) | 6401 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) |
6372 #undef ADJUST_LAST_TIME_OBJECT_COUNT | 6402 #undef ADJUST_LAST_TIME_OBJECT_COUNT |
6373 | 6403 |
6374 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); | 6404 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); |
6375 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); | 6405 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); |
6376 ClearObjectStats(); | 6406 ClearObjectStats(); |
6377 } | 6407 } |
6378 | 6408 |
6379 } } // namespace v8::internal | 6409 } } // namespace v8::internal |
OLD | NEW |