OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. Use of this | 1 // Copyright 2016 the V8 project authors. All rights reserved. Use of this |
2 // source code is governed by a BSD-style license that can be found in the | 2 // source code is governed by a BSD-style license that can be found in the |
3 // LICENSE file. | 3 // LICENSE file. |
4 | 4 |
5 #include <cmath> | 5 #include <cmath> |
6 #include <functional> | 6 #include <functional> |
7 #include <limits> | 7 #include <limits> |
8 | 8 |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/utils/random-number-generator.h" | 10 #include "src/base/utils/random-number-generator.h" |
(...skipping 29 matching lines...) Expand all Loading... |
40 template <> | 40 template <> |
41 void CheckOobValue(double val) { | 41 void CheckOobValue(double val) { |
42 CHECK(std::isnan(val)); | 42 CHECK(std::isnan(val)); |
43 } | 43 } |
44 } // namespace | 44 } // namespace |
45 | 45 |
46 namespace v8 { | 46 namespace v8 { |
47 namespace internal { | 47 namespace internal { |
48 namespace compiler { | 48 namespace compiler { |
49 | 49 |
| 50 enum TestAlignment { |
| 51 kAligned, |
| 52 kUnaligned, |
| 53 }; |
| 54 |
50 // This is a America! | 55 // This is a America! |
51 #define A_BILLION 1000000000ULL | 56 #define A_BILLION 1000000000ULL |
52 #define A_GIG (1024ULL * 1024ULL * 1024ULL) | 57 #define A_GIG (1024ULL * 1024ULL * 1024ULL) |
53 | 58 |
54 TEST(RunLoadInt32) { | 59 namespace { |
| 60 void RunLoadInt32(const TestAlignment t) { |
55 RawMachineAssemblerTester<int32_t> m; | 61 RawMachineAssemblerTester<int32_t> m; |
56 | 62 |
57 int32_t p1 = 0; // loads directly from this location. | 63 int32_t p1 = 0; // loads directly from this location. |
58 m.Return(m.LoadFromPointer(&p1, MachineType::Int32())); | 64 |
| 65 if (t == TestAlignment::kAligned) { |
| 66 m.Return(m.LoadFromPointer(&p1, MachineType::Int32())); |
| 67 } else if (t == TestAlignment::kUnaligned) { |
| 68 m.Return(m.UnalignedLoadFromPointer(&p1, MachineType::Int32())); |
| 69 } else { |
| 70 UNREACHABLE(); |
| 71 } |
59 | 72 |
60 FOR_INT32_INPUTS(i) { | 73 FOR_INT32_INPUTS(i) { |
61 p1 = *i; | 74 p1 = *i; |
62 CHECK_EQ(p1, m.Call()); | 75 CHECK_EQ(p1, m.Call()); |
63 } | 76 } |
64 } | 77 } |
65 | 78 |
66 TEST(RunLoadInt32Offset) { | 79 void RunLoadInt32Offset(TestAlignment t) { |
67 int32_t p1 = 0; // loads directly from this location. | 80 int32_t p1 = 0; // loads directly from this location. |
68 | 81 |
69 int32_t offsets[] = {-2000000, -100, -101, 1, 3, | 82 int32_t offsets[] = {-2000000, -100, -101, 1, 3, |
70 7, 120, 2000, 2000000000, 0xff}; | 83 7, 120, 2000, 2000000000, 0xff}; |
71 | 84 |
72 for (size_t i = 0; i < arraysize(offsets); i++) { | 85 for (size_t i = 0; i < arraysize(offsets); i++) { |
73 RawMachineAssemblerTester<int32_t> m; | 86 RawMachineAssemblerTester<int32_t> m; |
74 int32_t offset = offsets[i]; | 87 int32_t offset = offsets[i]; |
75 byte* pointer = reinterpret_cast<byte*>(&p1) - offset; | 88 byte* pointer = reinterpret_cast<byte*>(&p1) - offset; |
| 89 |
76 // generate load [#base + #index] | 90 // generate load [#base + #index] |
77 m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset)); | 91 if (t == TestAlignment::kAligned) { |
| 92 m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset)); |
| 93 } else if (t == TestAlignment::kUnaligned) { |
| 94 m.Return( |
| 95 m.UnalignedLoadFromPointer(pointer, MachineType::Int32(), offset)); |
| 96 } else { |
| 97 UNREACHABLE(); |
| 98 } |
78 | 99 |
79 FOR_INT32_INPUTS(j) { | 100 FOR_INT32_INPUTS(j) { |
80 p1 = *j; | 101 p1 = *j; |
81 CHECK_EQ(p1, m.Call()); | 102 CHECK_EQ(p1, m.Call()); |
82 } | 103 } |
83 } | 104 } |
84 } | 105 } |
85 | 106 |
86 TEST(RunLoadStoreFloat32Offset) { | 107 void RunLoadStoreFloat32Offset(TestAlignment t) { |
87 float p1 = 0.0f; // loads directly from this location. | 108 float p1 = 0.0f; // loads directly from this location. |
88 float p2 = 0.0f; // and stores directly into this location. | 109 float p2 = 0.0f; // and stores directly into this location. |
89 | 110 |
90 FOR_INT32_INPUTS(i) { | 111 FOR_INT32_INPUTS(i) { |
91 int32_t magic = 0x2342aabb + *i * 3; | 112 int32_t magic = 0x2342aabb + *i * 3; |
92 RawMachineAssemblerTester<int32_t> m; | 113 RawMachineAssemblerTester<int32_t> m; |
93 int32_t offset = *i; | 114 int32_t offset = *i; |
94 byte* from = reinterpret_cast<byte*>(&p1) - offset; | 115 byte* from = reinterpret_cast<byte*>(&p1) - offset; |
95 byte* to = reinterpret_cast<byte*>(&p2) - offset; | 116 byte* to = reinterpret_cast<byte*>(&p2) - offset; |
96 // generate load [#base + #index] | 117 // generate load [#base + #index] |
97 Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from), | 118 if (t == TestAlignment::kAligned) { |
98 m.IntPtrConstant(offset)); | 119 Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from), |
99 m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to), | 120 m.IntPtrConstant(offset)); |
100 m.IntPtrConstant(offset), load, kNoWriteBarrier); | 121 m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to), |
| 122 m.IntPtrConstant(offset), load, kNoWriteBarrier); |
| 123 } else if (t == TestAlignment::kUnaligned) { |
| 124 Node* load = |
| 125 m.UnalignedLoad(MachineType::Float32(), m.PointerConstant(from), |
| 126 m.IntPtrConstant(offset)); |
| 127 m.UnalignedStore(MachineRepresentation::kFloat32, m.PointerConstant(to), |
| 128 m.IntPtrConstant(offset), load); |
| 129 |
| 130 } else { |
| 131 UNREACHABLE(); |
| 132 } |
101 m.Return(m.Int32Constant(magic)); | 133 m.Return(m.Int32Constant(magic)); |
102 | 134 |
103 FOR_FLOAT32_INPUTS(j) { | 135 FOR_FLOAT32_INPUTS(j) { |
104 p1 = *j; | 136 p1 = *j; |
105 p2 = *j - 5; | 137 p2 = *j - 5; |
106 CHECK_EQ(magic, m.Call()); | 138 CHECK_EQ(magic, m.Call()); |
107 CheckDoubleEq(p1, p2); | 139 CheckDoubleEq(p1, p2); |
108 } | 140 } |
109 } | 141 } |
110 } | 142 } |
111 | 143 |
112 TEST(RunLoadStoreFloat64Offset) { | 144 void RunLoadStoreFloat64Offset(TestAlignment t) { |
113 double p1 = 0; // loads directly from this location. | 145 double p1 = 0; // loads directly from this location. |
114 double p2 = 0; // and stores directly into this location. | 146 double p2 = 0; // and stores directly into this location. |
115 | 147 |
116 FOR_INT32_INPUTS(i) { | 148 FOR_INT32_INPUTS(i) { |
117 int32_t magic = 0x2342aabb + *i * 3; | 149 int32_t magic = 0x2342aabb + *i * 3; |
118 RawMachineAssemblerTester<int32_t> m; | 150 RawMachineAssemblerTester<int32_t> m; |
119 int32_t offset = *i; | 151 int32_t offset = *i; |
120 byte* from = reinterpret_cast<byte*>(&p1) - offset; | 152 byte* from = reinterpret_cast<byte*>(&p1) - offset; |
121 byte* to = reinterpret_cast<byte*>(&p2) - offset; | 153 byte* to = reinterpret_cast<byte*>(&p2) - offset; |
122 // generate load [#base + #index] | 154 // generate load [#base + #index] |
123 Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from), | 155 if (t == TestAlignment::kAligned) { |
124 m.IntPtrConstant(offset)); | 156 Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from), |
125 m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to), | 157 m.IntPtrConstant(offset)); |
126 m.IntPtrConstant(offset), load, kNoWriteBarrier); | 158 m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to), |
| 159 m.IntPtrConstant(offset), load, kNoWriteBarrier); |
| 160 } else if (t == TestAlignment::kUnaligned) { |
| 161 Node* load = |
| 162 m.UnalignedLoad(MachineType::Float64(), m.PointerConstant(from), |
| 163 m.IntPtrConstant(offset)); |
| 164 m.UnalignedStore(MachineRepresentation::kFloat64, m.PointerConstant(to), |
| 165 m.IntPtrConstant(offset), load); |
| 166 } else { |
| 167 UNREACHABLE(); |
| 168 } |
127 m.Return(m.Int32Constant(magic)); | 169 m.Return(m.Int32Constant(magic)); |
128 | 170 |
129 FOR_FLOAT64_INPUTS(j) { | 171 FOR_FLOAT64_INPUTS(j) { |
130 p1 = *j; | 172 p1 = *j; |
131 p2 = *j - 5; | 173 p2 = *j - 5; |
132 CHECK_EQ(magic, m.Call()); | 174 CHECK_EQ(magic, m.Call()); |
133 CheckDoubleEq(p1, p2); | 175 CheckDoubleEq(p1, p2); |
134 } | 176 } |
135 } | 177 } |
136 } | 178 } |
| 179 } // namespace |
| 180 |
| 181 TEST(RunLoadInt32) { RunLoadInt32(TestAlignment::kAligned); } |
| 182 |
| 183 TEST(RunUnalignedLoadInt32) { RunLoadInt32(TestAlignment::kUnaligned); } |
| 184 |
| 185 TEST(RunLoadInt32Offset) { RunLoadInt32Offset(TestAlignment::kAligned); } |
| 186 |
| 187 TEST(RunUnalignedLoadInt32Offset) { |
| 188 RunLoadInt32Offset(TestAlignment::kUnaligned); |
| 189 } |
| 190 |
| 191 TEST(RunLoadStoreFloat32Offset) { |
| 192 RunLoadStoreFloat32Offset(TestAlignment::kAligned); |
| 193 } |
| 194 |
| 195 TEST(RunUnalignedLoadStoreFloat32Offset) { |
| 196 RunLoadStoreFloat32Offset(TestAlignment::kUnaligned); |
| 197 } |
| 198 |
| 199 TEST(RunLoadStoreFloat64Offset) { |
| 200 RunLoadStoreFloat64Offset(TestAlignment::kAligned); |
| 201 } |
| 202 |
| 203 TEST(RunUnalignedLoadStoreFloat64Offset) { |
| 204 RunLoadStoreFloat64Offset(TestAlignment::kUnaligned); |
| 205 } |
137 | 206 |
138 namespace { | 207 namespace { |
139 template <typename Type> | 208 template <typename Type> |
140 void RunLoadImmIndex(MachineType rep) { | 209 void RunLoadImmIndex(MachineType rep, TestAlignment t) { |
141 const int kNumElems = 3; | 210 const int kNumElems = 3; |
142 Type buffer[kNumElems]; | 211 Type buffer[kNumElems]; |
143 | 212 |
144 // initialize the buffer with some raw data. | 213 // initialize the buffer with some raw data. |
145 byte* raw = reinterpret_cast<byte*>(buffer); | 214 byte* raw = reinterpret_cast<byte*>(buffer); |
146 for (size_t i = 0; i < sizeof(buffer); i++) { | 215 for (size_t i = 0; i < sizeof(buffer); i++) { |
147 raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA); | 216 raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA); |
148 } | 217 } |
149 | 218 |
150 // Test with various large and small offsets. | 219 // Test with various large and small offsets. |
151 for (int offset = -1; offset <= 200000; offset *= -5) { | 220 for (int offset = -1; offset <= 200000; offset *= -5) { |
152 for (int i = 0; i < kNumElems; i++) { | 221 for (int i = 0; i < kNumElems; i++) { |
153 BufferedRawMachineAssemblerTester<Type> m; | 222 BufferedRawMachineAssemblerTester<Type> m; |
154 Node* base = m.PointerConstant(buffer - offset); | 223 Node* base = m.PointerConstant(buffer - offset); |
155 Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0])); | 224 Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0])); |
156 m.Return(m.Load(rep, base, index)); | 225 if (t == TestAlignment::kAligned) { |
| 226 m.Return(m.Load(rep, base, index)); |
| 227 } else if (t == TestAlignment::kUnaligned) { |
| 228 m.Return(m.UnalignedLoad(rep, base, index)); |
| 229 } else { |
| 230 UNREACHABLE(); |
| 231 } |
157 | 232 |
158 volatile Type expected = buffer[i]; | 233 volatile Type expected = buffer[i]; |
159 volatile Type actual = m.Call(); | 234 volatile Type actual = m.Call(); |
160 CHECK_EQ(expected, actual); | 235 CHECK_EQ(expected, actual); |
161 } | 236 } |
162 } | 237 } |
163 } | 238 } |
164 | 239 |
165 template <typename CType> | 240 template <typename CType> |
166 void RunLoadStore(MachineType rep) { | 241 void RunLoadStore(MachineType rep, TestAlignment t) { |
167 const int kNumElems = 4; | 242 const int kNumElems = 4; |
168 CType buffer[kNumElems]; | 243 CType buffer[kNumElems]; |
169 | 244 |
170 for (int32_t x = 0; x < kNumElems; x++) { | 245 for (int32_t x = 0; x < kNumElems; x++) { |
171 int32_t y = kNumElems - x - 1; | 246 int32_t y = kNumElems - x - 1; |
172 // initialize the buffer with raw data. | 247 // initialize the buffer with raw data. |
173 byte* raw = reinterpret_cast<byte*>(buffer); | 248 byte* raw = reinterpret_cast<byte*>(buffer); |
174 for (size_t i = 0; i < sizeof(buffer); i++) { | 249 for (size_t i = 0; i < sizeof(buffer); i++) { |
175 raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA); | 250 raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA); |
176 } | 251 } |
177 | 252 |
178 RawMachineAssemblerTester<int32_t> m; | 253 RawMachineAssemblerTester<int32_t> m; |
179 int32_t OK = 0x29000 + x; | 254 int32_t OK = 0x29000 + x; |
180 Node* base = m.PointerConstant(buffer); | 255 Node* base = m.PointerConstant(buffer); |
181 Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0])); | 256 Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0])); |
182 Node* load = m.Load(rep, base, index0); | |
183 Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0])); | 257 Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0])); |
184 m.Store(rep.representation(), base, index1, load, kNoWriteBarrier); | 258 if (t == TestAlignment::kAligned) { |
| 259 Node* load = m.Load(rep, base, index0); |
| 260 m.Store(rep.representation(), base, index1, load, kNoWriteBarrier); |
| 261 } else if (t == TestAlignment::kUnaligned) { |
| 262 Node* load = m.UnalignedLoad(rep, base, index0); |
| 263 m.UnalignedStore(rep.representation(), base, index1, load); |
| 264 } |
| 265 |
185 m.Return(m.Int32Constant(OK)); | 266 m.Return(m.Int32Constant(OK)); |
186 | 267 |
187 CHECK(buffer[x] != buffer[y]); | 268 CHECK(buffer[x] != buffer[y]); |
188 CHECK_EQ(OK, m.Call()); | 269 CHECK_EQ(OK, m.Call()); |
189 CHECK(buffer[x] == buffer[y]); | 270 CHECK(buffer[x] == buffer[y]); |
190 } | 271 } |
191 } | 272 } |
| 273 |
| 274 template <typename CType> |
| 275 void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) { |
| 276 CType in, out; |
| 277 CType in_buffer[2]; |
| 278 CType out_buffer[2]; |
| 279 byte* raw; |
| 280 |
| 281 for (int x = 0; x < sizeof(CType); x++) { |
| 282 int y = sizeof(CType) - x; |
| 283 |
| 284 raw = reinterpret_cast<byte*>(&in); |
| 285 for (size_t i = 0; i < sizeof(CType); i++) { |
| 286 raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA); |
| 287 } |
| 288 |
| 289 raw = reinterpret_cast<byte*>(in_buffer); |
| 290 MemCopy(raw + x, &in, sizeof(CType)); |
| 291 |
| 292 RawMachineAssemblerTester<int32_t> m; |
| 293 int32_t OK = 0x29000 + x; |
| 294 |
| 295 Node* base0 = m.PointerConstant(in_buffer); |
| 296 Node* base1 = m.PointerConstant(out_buffer); |
| 297 Node* index0 = m.IntPtrConstant(x); |
| 298 Node* index1 = m.IntPtrConstant(y); |
| 299 Node* load = m.UnalignedLoad(rep, base0, index0); |
| 300 m.UnalignedStore(rep.representation(), base1, index1, load); |
| 301 |
| 302 m.Return(m.Int32Constant(OK)); |
| 303 |
| 304 CHECK_EQ(OK, m.Call()); |
| 305 |
| 306 raw = reinterpret_cast<byte*>(&out_buffer); |
| 307 MemCopy(&out, raw + y, sizeof(CType)); |
| 308 CHECK(in == out); |
| 309 } |
| 310 } |
192 } // namespace | 311 } // namespace |
193 | 312 |
194 TEST(RunLoadImmIndex) { | 313 TEST(RunLoadImmIndex) { |
195 RunLoadImmIndex<int8_t>(MachineType::Int8()); | 314 RunLoadImmIndex<int8_t>(MachineType::Int8(), TestAlignment::kAligned); |
196 RunLoadImmIndex<uint8_t>(MachineType::Uint8()); | 315 RunLoadImmIndex<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned); |
197 RunLoadImmIndex<int16_t>(MachineType::Int16()); | 316 RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kAligned); |
198 RunLoadImmIndex<uint16_t>(MachineType::Uint16()); | 317 RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned); |
199 RunLoadImmIndex<int32_t>(MachineType::Int32()); | 318 RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned); |
200 RunLoadImmIndex<uint32_t>(MachineType::Uint32()); | 319 RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned); |
201 RunLoadImmIndex<int32_t*>(MachineType::AnyTagged()); | 320 RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(), TestAlignment::kAligned); |
202 RunLoadImmIndex<float>(MachineType::Float32()); | 321 RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned); |
203 RunLoadImmIndex<double>(MachineType::Float64()); | 322 RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned); |
204 #if V8_TARGET_ARCH_64_BIT | 323 #if V8_TARGET_ARCH_64_BIT |
205 RunLoadImmIndex<int64_t>(MachineType::Int64()); | 324 RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kAligned); |
| 325 #endif |
| 326 // TODO(titzer): test various indexing modes. |
| 327 } |
| 328 |
| 329 TEST(RunUnalignedLoadImmIndex) { |
| 330 RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned); |
| 331 RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned); |
| 332 RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned); |
| 333 RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned); |
| 334 RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(), |
| 335 TestAlignment::kUnaligned); |
| 336 RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned); |
| 337 RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned); |
| 338 #if V8_TARGET_ARCH_64_BIT |
| 339 RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned); |
206 #endif | 340 #endif |
207 // TODO(titzer): test various indexing modes. | 341 // TODO(titzer): test various indexing modes. |
208 } | 342 } |
209 | 343 |
210 TEST(RunLoadStore) { | 344 TEST(RunLoadStore) { |
211 RunLoadStore<int8_t>(MachineType::Int8()); | 345 RunLoadStore<int8_t>(MachineType::Int8(), TestAlignment::kAligned); |
212 RunLoadStore<uint8_t>(MachineType::Uint8()); | 346 RunLoadStore<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned); |
213 RunLoadStore<int16_t>(MachineType::Int16()); | 347 RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kAligned); |
214 RunLoadStore<uint16_t>(MachineType::Uint16()); | 348 RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned); |
215 RunLoadStore<int32_t>(MachineType::Int32()); | 349 RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned); |
216 RunLoadStore<uint32_t>(MachineType::Uint32()); | 350 RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned); |
217 RunLoadStore<void*>(MachineType::AnyTagged()); | 351 RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kAligned); |
218 RunLoadStore<float>(MachineType::Float32()); | 352 RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned); |
219 RunLoadStore<double>(MachineType::Float64()); | 353 RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned); |
220 #if V8_TARGET_ARCH_64_BIT | 354 #if V8_TARGET_ARCH_64_BIT |
221 RunLoadStore<int64_t>(MachineType::Int64()); | 355 RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kAligned); |
222 #endif | 356 #endif |
223 } | 357 } |
224 | 358 |
| 359 TEST(RunUnalignedLoadStore) { |
| 360 RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned); |
| 361 RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned); |
| 362 RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned); |
| 363 RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned); |
| 364 RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kUnaligned); |
| 365 RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned); |
| 366 RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned); |
| 367 #if V8_TARGET_ARCH_64_BIT |
| 368 RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned); |
| 369 #endif |
| 370 } |
| 371 |
| 372 TEST(RunUnalignedLoadStoreUnalignedAccess) { |
| 373 RunUnalignedLoadStoreUnalignedAccess<int16_t>(MachineType::Int16()); |
| 374 RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16()); |
| 375 RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32()); |
| 376 RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32()); |
| 377 RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::AnyTagged()); |
| 378 RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32()); |
| 379 RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64()); |
| 380 #if V8_TARGET_ARCH_64_BIT |
| 381 RunUnalignedLoadStoreUnalignedAccess<int64_t>(MachineType::Int64()); |
| 382 #endif |
| 383 } |
| 384 |
225 #if V8_TARGET_LITTLE_ENDIAN | 385 #if V8_TARGET_LITTLE_ENDIAN |
226 #define LSB(addr, bytes) addr | 386 #define LSB(addr, bytes) addr |
227 #elif V8_TARGET_BIG_ENDIAN | 387 #elif V8_TARGET_BIG_ENDIAN |
228 #define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes | 388 #define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes |
229 #else | 389 #else |
230 #error "Unknown Architecture" | 390 #error "Unknown Architecture" |
231 #endif | 391 #endif |
232 | 392 |
233 TEST(RunLoadStoreSignExtend32) { | 393 namespace { |
| 394 void RunLoadStoreSignExtend32(TestAlignment t) { |
234 int32_t buffer[4]; | 395 int32_t buffer[4]; |
235 RawMachineAssemblerTester<int32_t> m; | 396 RawMachineAssemblerTester<int32_t> m; |
236 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8()); | 397 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8()); |
237 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16()); | 398 if (t == TestAlignment::kAligned) { |
238 Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Int32()); | 399 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16()); |
239 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8); | 400 Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Int32()); |
240 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16); | 401 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8); |
241 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32); | 402 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16); |
| 403 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32); |
| 404 } else if (t == TestAlignment::kUnaligned) { |
| 405 Node* load16 = |
| 406 m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16()); |
| 407 Node* load32 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int32()); |
| 408 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8); |
| 409 m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32, |
| 410 load16); |
| 411 m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32, |
| 412 load32); |
| 413 } else { |
| 414 UNREACHABLE(); |
| 415 } |
242 m.Return(load8); | 416 m.Return(load8); |
243 | 417 |
244 FOR_INT32_INPUTS(i) { | 418 FOR_INT32_INPUTS(i) { |
245 buffer[0] = *i; | 419 buffer[0] = *i; |
246 | 420 |
247 CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call()); | 421 CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call()); |
248 CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]); | 422 CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]); |
249 CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]); | 423 CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]); |
250 CHECK_EQ(*i, buffer[3]); | 424 CHECK_EQ(*i, buffer[3]); |
251 } | 425 } |
252 } | 426 } |
253 | 427 |
254 TEST(RunLoadStoreZeroExtend32) { | 428 void RunLoadStoreZeroExtend32(TestAlignment t) { |
255 uint32_t buffer[4]; | 429 uint32_t buffer[4]; |
256 RawMachineAssemblerTester<uint32_t> m; | 430 RawMachineAssemblerTester<uint32_t> m; |
257 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8()); | 431 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8()); |
258 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16()); | 432 if (t == TestAlignment::kAligned) { |
259 Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Uint32()); | 433 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16()); |
260 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8); | 434 Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Uint32()); |
261 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16); | 435 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8); |
262 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32); | 436 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16); |
| 437 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32); |
| 438 } else if (t == TestAlignment::kUnaligned) { |
| 439 Node* load16 = |
| 440 m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16()); |
| 441 Node* load32 = |
| 442 m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint32()); |
| 443 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8); |
| 444 m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32, |
| 445 load16); |
| 446 m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32, |
| 447 load32); |
| 448 } |
263 m.Return(load8); | 449 m.Return(load8); |
264 | 450 |
265 FOR_UINT32_INPUTS(i) { | 451 FOR_UINT32_INPUTS(i) { |
266 buffer[0] = *i; | 452 buffer[0] = *i; |
267 | 453 |
268 CHECK_EQ((*i & 0xff), m.Call()); | 454 CHECK_EQ((*i & 0xff), m.Call()); |
269 CHECK_EQ((*i & 0xff), buffer[1]); | 455 CHECK_EQ((*i & 0xff), buffer[1]); |
270 CHECK_EQ((*i & 0xffff), buffer[2]); | 456 CHECK_EQ((*i & 0xffff), buffer[2]); |
271 CHECK_EQ(*i, buffer[3]); | 457 CHECK_EQ(*i, buffer[3]); |
272 } | 458 } |
273 } | 459 } |
| 460 } // namespace |
| 461 |
| 462 TEST(RunLoadStoreSignExtend32) { |
| 463 RunLoadStoreSignExtend32(TestAlignment::kAligned); |
| 464 } |
| 465 |
| 466 TEST(RunUnalignedLoadStoreSignExtend32) { |
| 467 RunLoadStoreSignExtend32(TestAlignment::kUnaligned); |
| 468 } |
| 469 |
| 470 TEST(RunLoadStoreZeroExtend32) { |
| 471 RunLoadStoreZeroExtend32(TestAlignment::kAligned); |
| 472 } |
| 473 |
| 474 TEST(RunUnalignedLoadStoreZeroExtend32) { |
| 475 RunLoadStoreZeroExtend32(TestAlignment::kUnaligned); |
| 476 } |
274 | 477 |
275 #if V8_TARGET_ARCH_64_BIT | 478 #if V8_TARGET_ARCH_64_BIT |
276 TEST(RunCheckedLoadInt64) { | |
277 int64_t buffer[] = {0x66bbccddeeff0011LL, 0x1122334455667788LL}; | |
278 RawMachineAssemblerTester<int64_t> m(MachineType::Int32()); | |
279 Node* base = m.PointerConstant(buffer); | |
280 Node* index = m.Parameter(0); | |
281 Node* length = m.Int32Constant(16); | |
282 Node* load = m.AddNode(m.machine()->CheckedLoad(MachineType::Int64()), base, | |
283 index, length); | |
284 m.Return(load); | |
285 | 479 |
286 CHECK_EQ(buffer[0], m.Call(0)); | 480 namespace { |
287 CHECK_EQ(buffer[1], m.Call(8)); | 481 void RunLoadStoreSignExtend64(TestAlignment t) { |
288 CheckOobValue(m.Call(16)); | |
289 } | |
290 | |
291 TEST(RunLoadStoreSignExtend64) { | |
292 if (true) return; // TODO(titzer): sign extension of loads to 64-bit. | 482 if (true) return; // TODO(titzer): sign extension of loads to 64-bit. |
293 int64_t buffer[5]; | 483 int64_t buffer[5]; |
294 RawMachineAssemblerTester<int64_t> m; | 484 RawMachineAssemblerTester<int64_t> m; |
295 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8()); | 485 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8()); |
296 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16()); | 486 if (t == TestAlignment::kAligned) { |
297 Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32()); | 487 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16()); |
298 Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Int64()); | 488 Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32()); |
299 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8); | 489 Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Int64()); |
300 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16); | 490 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8); |
301 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32); | 491 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16); |
302 m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64); | 492 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32); |
| 493 m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64); |
| 494 } else if (t == TestAlignment::kUnaligned) { |
| 495 Node* load16 = |
| 496 m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16()); |
| 497 Node* load32 = |
| 498 m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32()); |
| 499 Node* load64 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int64()); |
| 500 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8); |
| 501 m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64, |
| 502 load16); |
| 503 m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64, |
| 504 load32); |
| 505 m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64, |
| 506 load64); |
| 507 } else { |
| 508 UNREACHABLE(); |
| 509 } |
303 m.Return(load8); | 510 m.Return(load8); |
304 | 511 |
305 FOR_INT64_INPUTS(i) { | 512 FOR_INT64_INPUTS(i) { |
306 buffer[0] = *i; | 513 buffer[0] = *i; |
307 | 514 |
308 CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call()); | 515 CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call()); |
309 CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]); | 516 CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]); |
310 CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]); | 517 CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]); |
311 CHECK_EQ(static_cast<int32_t>(*i & 0xffffffff), buffer[3]); | 518 CHECK_EQ(static_cast<int32_t>(*i & 0xffffffff), buffer[3]); |
312 CHECK_EQ(*i, buffer[4]); | 519 CHECK_EQ(*i, buffer[4]); |
313 } | 520 } |
314 } | 521 } |
315 | 522 |
316 TEST(RunLoadStoreZeroExtend64) { | 523 void RunLoadStoreZeroExtend64(TestAlignment t) { |
317 if (kPointerSize < 8) return; | 524 if (kPointerSize < 8) return; |
318 uint64_t buffer[5]; | 525 uint64_t buffer[5]; |
319 RawMachineAssemblerTester<int64_t> m; | 526 RawMachineAssemblerTester<int64_t> m; |
320 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8()); | 527 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8()); |
321 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16()); | 528 if (t == TestAlignment::kAligned) { |
322 Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32()); | 529 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16()); |
323 Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Uint64()); | 530 Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32()); |
324 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8); | 531 Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Uint64()); |
325 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16); | 532 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8); |
326 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32); | 533 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16); |
327 m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64); | 534 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32); |
| 535 m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64); |
| 536 } else if (t == TestAlignment::kUnaligned) { |
| 537 Node* load16 = |
| 538 m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16()); |
| 539 Node* load32 = |
| 540 m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32()); |
| 541 Node* load64 = |
| 542 m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint64()); |
| 543 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8); |
| 544 m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64, |
| 545 load16); |
| 546 m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64, |
| 547 load32); |
| 548 m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64, |
| 549 load64); |
| 550 } else { |
| 551 UNREACHABLE(); |
| 552 } |
328 m.Return(load8); | 553 m.Return(load8); |
329 | 554 |
330 FOR_UINT64_INPUTS(i) { | 555 FOR_UINT64_INPUTS(i) { |
331 buffer[0] = *i; | 556 buffer[0] = *i; |
332 | 557 |
333 CHECK_EQ((*i & 0xff), m.Call()); | 558 CHECK_EQ((*i & 0xff), m.Call()); |
334 CHECK_EQ((*i & 0xff), buffer[1]); | 559 CHECK_EQ((*i & 0xff), buffer[1]); |
335 CHECK_EQ((*i & 0xffff), buffer[2]); | 560 CHECK_EQ((*i & 0xffff), buffer[2]); |
336 CHECK_EQ((*i & 0xffffffff), buffer[3]); | 561 CHECK_EQ((*i & 0xffffffff), buffer[3]); |
337 CHECK_EQ(*i, buffer[4]); | 562 CHECK_EQ(*i, buffer[4]); |
338 } | 563 } |
339 } | 564 } |
340 | 565 |
| 566 } // namespace |
| 567 |
| 568 TEST(RunCheckedLoadInt64) { |
| 569 int64_t buffer[] = {0x66bbccddeeff0011LL, 0x1122334455667788LL}; |
| 570 RawMachineAssemblerTester<int64_t> m(MachineType::Int32()); |
| 571 Node* base = m.PointerConstant(buffer); |
| 572 Node* index = m.Parameter(0); |
| 573 Node* length = m.Int32Constant(16); |
| 574 Node* load = m.AddNode(m.machine()->CheckedLoad(MachineType::Int64()), base, |
| 575 index, length); |
| 576 m.Return(load); |
| 577 |
| 578 CHECK_EQ(buffer[0], m.Call(0)); |
| 579 CHECK_EQ(buffer[1], m.Call(8)); |
| 580 CheckOobValue(m.Call(16)); |
| 581 } |
| 582 |
| 583 TEST(RunLoadStoreSignExtend64) { |
| 584 RunLoadStoreSignExtend64(TestAlignment::kAligned); |
| 585 } |
| 586 |
| 587 TEST(RunUnalignedLoadStoreSignExtend64) { |
| 588 RunLoadStoreSignExtend64(TestAlignment::kUnaligned); |
| 589 } |
| 590 |
| 591 TEST(RunLoadStoreZeroExtend64) { |
| 592 RunLoadStoreZeroExtend64(TestAlignment::kAligned); |
| 593 } |
| 594 |
| 595 TEST(RunUnalignedLoadStoreZeroExtend64) { |
| 596 RunLoadStoreZeroExtend64(TestAlignment::kUnaligned); |
| 597 } |
| 598 |
341 TEST(RunCheckedStoreInt64) { | 599 TEST(RunCheckedStoreInt64) { |
342 const int64_t write = 0x5566778899aabbLL; | 600 const int64_t write = 0x5566778899aabbLL; |
343 const int64_t before = 0x33bbccddeeff0011LL; | 601 const int64_t before = 0x33bbccddeeff0011LL; |
344 int64_t buffer[] = {before, before}; | 602 int64_t buffer[] = {before, before}; |
345 RawMachineAssemblerTester<int32_t> m(MachineType::Int32()); | 603 RawMachineAssemblerTester<int32_t> m(MachineType::Int32()); |
346 Node* base = m.PointerConstant(buffer); | 604 Node* base = m.PointerConstant(buffer); |
347 Node* index = m.Parameter(0); | 605 Node* index = m.Parameter(0); |
348 Node* length = m.Int32Constant(16); | 606 Node* length = m.Int32Constant(16); |
349 Node* value = m.Int64Constant(write); | 607 Node* value = m.Int64Constant(write); |
350 Node* store = | 608 Node* store = |
(...skipping 11 matching lines...) Expand all Loading... |
362 CHECK_EQ(before, buffer[1]); | 620 CHECK_EQ(before, buffer[1]); |
363 | 621 |
364 CHECK_EQ(11, m.Call(8)); | 622 CHECK_EQ(11, m.Call(8)); |
365 CHECK_EQ(write, buffer[0]); | 623 CHECK_EQ(write, buffer[0]); |
366 CHECK_EQ(write, buffer[1]); | 624 CHECK_EQ(write, buffer[1]); |
367 } | 625 } |
368 #endif | 626 #endif |
369 | 627 |
370 namespace { | 628 namespace { |
371 template <typename IntType> | 629 template <typename IntType> |
372 void LoadStoreTruncation(MachineType kRepresentation) { | 630 void LoadStoreTruncation(MachineType kRepresentation, TestAlignment t) { |
373 IntType input; | 631 IntType input; |
374 | 632 |
375 RawMachineAssemblerTester<int32_t> m; | 633 RawMachineAssemblerTester<int32_t> m; |
376 Node* a = m.LoadFromPointer(&input, kRepresentation); | 634 Node* ap1; |
377 Node* ap1 = m.Int32Add(a, m.Int32Constant(1)); | 635 if (t == TestAlignment::kAligned) { |
378 m.StoreToPointer(&input, kRepresentation.representation(), ap1); | 636 Node* a = m.LoadFromPointer(&input, kRepresentation); |
| 637 ap1 = m.Int32Add(a, m.Int32Constant(1)); |
| 638 m.StoreToPointer(&input, kRepresentation.representation(), ap1); |
| 639 } else if (t == TestAlignment::kUnaligned) { |
| 640 Node* a = m.UnalignedLoadFromPointer(&input, kRepresentation); |
| 641 ap1 = m.Int32Add(a, m.Int32Constant(1)); |
| 642 m.UnalignedStoreToPointer(&input, kRepresentation.representation(), ap1); |
| 643 } else { |
| 644 UNREACHABLE(); |
| 645 } |
379 m.Return(ap1); | 646 m.Return(ap1); |
380 | 647 |
381 const IntType max = std::numeric_limits<IntType>::max(); | 648 const IntType max = std::numeric_limits<IntType>::max(); |
382 const IntType min = std::numeric_limits<IntType>::min(); | 649 const IntType min = std::numeric_limits<IntType>::min(); |
383 | 650 |
384 // Test upper bound. | 651 // Test upper bound. |
385 input = max; | 652 input = max; |
386 CHECK_EQ(max + 1, m.Call()); | 653 CHECK_EQ(max + 1, m.Call()); |
387 CHECK_EQ(min, input); | 654 CHECK_EQ(min, input); |
388 | 655 |
389 // Test lower bound. | 656 // Test lower bound. |
390 input = min; | 657 input = min; |
391 CHECK_EQ(static_cast<IntType>(max + 2), m.Call()); | 658 CHECK_EQ(static_cast<IntType>(max + 2), m.Call()); |
392 CHECK_EQ(min + 1, input); | 659 CHECK_EQ(min + 1, input); |
393 | 660 |
394 // Test all one byte values that are not one byte bounds. | 661 // Test all one byte values that are not one byte bounds. |
395 for (int i = -127; i < 127; i++) { | 662 for (int i = -127; i < 127; i++) { |
396 input = i; | 663 input = i; |
397 int expected = i >= 0 ? i + 1 : max + (i - min) + 2; | 664 int expected = i >= 0 ? i + 1 : max + (i - min) + 2; |
398 CHECK_EQ(static_cast<IntType>(expected), m.Call()); | 665 CHECK_EQ(static_cast<IntType>(expected), m.Call()); |
399 CHECK_EQ(static_cast<IntType>(i + 1), input); | 666 CHECK_EQ(static_cast<IntType>(i + 1), input); |
400 } | 667 } |
401 } | 668 } |
402 } // namespace | 669 } // namespace |
403 | 670 |
404 TEST(RunLoadStoreTruncation) { | 671 TEST(RunLoadStoreTruncation) { |
405 LoadStoreTruncation<int8_t>(MachineType::Int8()); | 672 LoadStoreTruncation<int8_t>(MachineType::Int8(), TestAlignment::kAligned); |
406 LoadStoreTruncation<int16_t>(MachineType::Int16()); | 673 LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kAligned); |
| 674 } |
| 675 |
| 676 TEST(RunUnalignedLoadStoreTruncation) { |
| 677 LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned); |
407 } | 678 } |
408 | 679 |
409 void TestRunOobCheckedLoad(bool length_is_immediate) { | 680 void TestRunOobCheckedLoad(bool length_is_immediate) { |
410 USE(CheckOobValue<int32_t>); | 681 USE(CheckOobValue<int32_t>); |
411 USE(CheckOobValue<int64_t>); | 682 USE(CheckOobValue<int64_t>); |
412 USE(CheckOobValue<float>); | 683 USE(CheckOobValue<float>); |
413 USE(CheckOobValue<double>); | 684 USE(CheckOobValue<double>); |
414 | 685 |
415 RawMachineAssemblerTester<int32_t> m(MachineType::Int32(), | 686 RawMachineAssemblerTester<int32_t> m(MachineType::Int32(), |
416 MachineType::Int32()); | 687 MachineType::Int32()); |
(...skipping 493 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
910 TestRunOobCheckedLoadT_pseudo<int32_t>(4 * A_BILLION, true); | 1181 TestRunOobCheckedLoadT_pseudo<int32_t>(4 * A_BILLION, true); |
911 TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, false); | 1182 TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, false); |
912 TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, true); | 1183 TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, true); |
913 TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, false); | 1184 TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, false); |
914 TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, true); | 1185 TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, true); |
915 } | 1186 } |
916 | 1187 |
917 } // namespace compiler | 1188 } // namespace compiler |
918 } // namespace internal | 1189 } // namespace internal |
919 } // namespace v8 | 1190 } // namespace v8 |
OLD | NEW |