Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(79)

Side by Side Diff: src/heap/heap.cc

Issue 1141523002: Implement unaligned allocate and allocate heap numbers in runtime double unaligned. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 1947 matching lines...) Expand 10 before | Expand all | Expand 10 after
1958 1958
1959 1959
1960 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 1960 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1961 0); // NOLINT 1961 0); // NOLINT
1962 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) == 1962 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
1963 0); // NOLINT 1963 0); // NOLINT
1964 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset & 1964 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1965 kDoubleAlignmentMask) == 0); // NOLINT 1965 kDoubleAlignmentMask) == 0); // NOLINT
1966 STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) == 1966 STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
1967 0); // NOLINT 1967 0); // NOLINT
1968 #ifdef V8_HOST_ARCH_32_BIT
1969 STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
1970 0); // NOLINT
1971 #endif
1968 1972
1969 1973
1970 HeapObject* Heap::EnsureDoubleAligned(HeapObject* object, int size) { 1974 HeapObject* Heap::EnsureAligned(HeapObject* object, int size,
1971 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) { 1975 AllocationAlignment alignment) {
1976 if (alignment == kDoubleAligned &&
1977 (OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1978 CreateFillerObjectAt(object->address(), kPointerSize);
1979 return HeapObject::FromAddress(object->address() + kPointerSize);
1980 } else if (alignment == kDoubleUnaligned &&
1981 (OffsetFrom(object->address()) & kDoubleAlignmentMask) == 0) {
1972 CreateFillerObjectAt(object->address(), kPointerSize); 1982 CreateFillerObjectAt(object->address(), kPointerSize);
1973 return HeapObject::FromAddress(object->address() + kPointerSize); 1983 return HeapObject::FromAddress(object->address() + kPointerSize);
1974 } else { 1984 } else {
1975 CreateFillerObjectAt(object->address() + size - kPointerSize, kPointerSize); 1985 CreateFillerObjectAt(object->address() + size - kPointerSize, kPointerSize);
1976 return object; 1986 return object;
1977 } 1987 }
1978 } 1988 }
1979 1989
1980 1990
1991 HeapObject* Heap::PrecedeWithFiller(HeapObject* object) {
1992 CreateFillerObjectAt(object->address(), kPointerSize);
1993 return HeapObject::FromAddress(object->address() + kPointerSize);
1994 }
1995
1996
1981 HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) { 1997 HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
1982 return EnsureDoubleAligned(object, size); 1998 return EnsureAligned(object, size, kDoubleAligned);
1983 } 1999 }
1984 2000
1985 2001
1986 enum LoggingAndProfiling { 2002 enum LoggingAndProfiling {
1987 LOGGING_AND_PROFILING_ENABLED, 2003 LOGGING_AND_PROFILING_ENABLED,
1988 LOGGING_AND_PROFILING_DISABLED 2004 LOGGING_AND_PROFILING_DISABLED
1989 }; 2005 };
1990 2006
1991 2007
1992 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS }; 2008 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
2124 } 2140 }
2125 } 2141 }
2126 2142
2127 template <int alignment> 2143 template <int alignment>
2128 static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot, 2144 static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
2129 HeapObject* object, int object_size) { 2145 HeapObject* object, int object_size) {
2130 Heap* heap = map->GetHeap(); 2146 Heap* heap = map->GetHeap();
2131 2147
2132 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE)); 2148 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
2133 AllocationResult allocation; 2149 AllocationResult allocation;
2134 #ifndef V8_HOST_ARCH_64_BIT 2150 #ifdef V8_HOST_ARCH_32_BIT
2135 if (alignment == kDoubleAlignment) { 2151 if (alignment == kDoubleAlignment) {
2136 allocation = heap->new_space()->AllocateRawDoubleAligned(object_size); 2152 allocation =
2153 heap->new_space()->AllocateRawAligned(object_size, kDoubleAligned);
2137 } else { 2154 } else {
2138 allocation = heap->new_space()->AllocateRaw(object_size); 2155 allocation = heap->new_space()->AllocateRaw(object_size);
2139 } 2156 }
2140 #else 2157 #else
2141 allocation = heap->new_space()->AllocateRaw(object_size); 2158 allocation = heap->new_space()->AllocateRaw(object_size);
2142 #endif 2159 #endif
2143 2160
2144 HeapObject* target = NULL; // Initialization to please compiler. 2161 HeapObject* target = NULL; // Initialization to please compiler.
2145 if (allocation.To(&target)) { 2162 if (allocation.To(&target)) {
2146 // Order is important here: Set the promotion limit before storing a 2163 // Order is important here: Set the promotion limit before storing a
(...skipping 13 matching lines...) Expand all
2160 return false; 2177 return false;
2161 } 2178 }
2162 2179
2163 2180
2164 template <ObjectContents object_contents, int alignment> 2181 template <ObjectContents object_contents, int alignment>
2165 static inline bool PromoteObject(Map* map, HeapObject** slot, 2182 static inline bool PromoteObject(Map* map, HeapObject** slot,
2166 HeapObject* object, int object_size) { 2183 HeapObject* object, int object_size) {
2167 Heap* heap = map->GetHeap(); 2184 Heap* heap = map->GetHeap();
2168 2185
2169 AllocationResult allocation; 2186 AllocationResult allocation;
2170 #ifndef V8_HOST_ARCH_64_BIT 2187 #ifdef V8_HOST_ARCH_32_BIT
2171 if (alignment == kDoubleAlignment) { 2188 if (alignment == kDoubleAlignment) {
2172 allocation = heap->old_space()->AllocateRawDoubleAligned(object_size); 2189 allocation =
2190 heap->old_space()->AllocateRawAligned(object_size, kDoubleAligned);
2173 } else { 2191 } else {
2174 allocation = heap->old_space()->AllocateRaw(object_size); 2192 allocation = heap->old_space()->AllocateRaw(object_size);
2175 } 2193 }
2176 #else 2194 #else
2177 allocation = heap->old_space()->AllocateRaw(object_size); 2195 allocation = heap->old_space()->AllocateRaw(object_size);
2178 #endif 2196 #endif
2179 2197
2180 HeapObject* target = NULL; // Initialization to please compiler. 2198 HeapObject* target = NULL; // Initialization to please compiler.
2181 if (allocation.To(&target)) { 2199 if (allocation.To(&target)) {
2182 MigrateObject(heap, object, target, object_size); 2200 MigrateObject(heap, object, target, object_size);
(...skipping 650 matching lines...) Expand 10 before | Expand all | Expand 10 after
2833 PretenureFlag pretenure) { 2851 PretenureFlag pretenure) {
2834 // Statically ensure that it is safe to allocate heap numbers in paged 2852 // Statically ensure that it is safe to allocate heap numbers in paged
2835 // spaces. 2853 // spaces.
2836 int size = HeapNumber::kSize; 2854 int size = HeapNumber::kSize;
2837 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize); 2855 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2838 2856
2839 AllocationSpace space = SelectSpace(size, pretenure); 2857 AllocationSpace space = SelectSpace(size, pretenure);
2840 2858
2841 HeapObject* result; 2859 HeapObject* result;
2842 { 2860 {
2843 AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE); 2861 AllocationResult allocation =
2862 AllocateRaw(size, space, OLD_SPACE, kDoubleUnaligned);
2844 if (!allocation.To(&result)) return allocation; 2863 if (!allocation.To(&result)) return allocation;
2845 } 2864 }
2846 2865
2847 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map(); 2866 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2848 HeapObject::cast(result)->set_map_no_write_barrier(map); 2867 HeapObject::cast(result)->set_map_no_write_barrier(map);
2849 HeapNumber::cast(result)->set_value(value); 2868 HeapNumber::cast(result)->set_value(value);
2850 return result; 2869 return result;
2851 } 2870 }
2852 2871
2853 2872
(...skipping 3656 matching lines...) Expand 10 before | Expand all | Expand 10 after
6510 } 6529 }
6511 delete list; 6530 delete list;
6512 } else { 6531 } else {
6513 prev = list; 6532 prev = list;
6514 } 6533 }
6515 list = next; 6534 list = next;
6516 } 6535 }
6517 } 6536 }
6518 } 6537 }
6519 } // namespace v8::internal 6538 } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698