Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: test/cctest/compiler/test-run-load-store.cc

Issue 2122853002: Implement UnaligedLoad and UnaligedStore turbofan operators. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Add UnalingedLoad and UnalignedStore tests Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/machine-type.h ('k') | test/unittests/compiler/int64-lowering-unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 the V8 project authors. All rights reserved. Use of this 1 // Copyright 2016 the V8 project authors. All rights reserved. Use of this
2 // source code is governed by a BSD-style license that can be found in the 2 // source code is governed by a BSD-style license that can be found in the
3 // LICENSE file. 3 // LICENSE file.
4 4
5 #include <cmath> 5 #include <cmath>
6 #include <functional> 6 #include <functional>
7 #include <limits> 7 #include <limits>
8 8
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/utils/random-number-generator.h" 10 #include "src/base/utils/random-number-generator.h"
(...skipping 29 matching lines...) Expand all
40 template <> 40 template <>
41 void CheckOobValue(double val) { 41 void CheckOobValue(double val) {
42 CHECK(std::isnan(val)); 42 CHECK(std::isnan(val));
43 } 43 }
44 } // namespace 44 } // namespace
45 45
46 namespace v8 { 46 namespace v8 {
47 namespace internal { 47 namespace internal {
48 namespace compiler { 48 namespace compiler {
49 49
50 enum LoadStoreKind {
titzer 2016/07/21 13:54:37 Can you simply name this TestAlignment { kAligned,
51 kLoadStore,
52 kUnalignedLoadStore,
53 };
54
50 // This is a America! 55 // This is a America!
51 #define A_BILLION 1000000000ULL 56 #define A_BILLION 1000000000ULL
52 #define A_GIG (1024ULL * 1024ULL * 1024ULL) 57 #define A_GIG (1024ULL * 1024ULL * 1024ULL)
53 58
54 TEST(RunLoadInt32) { 59 namespace {
60 void RunLoadInt32(const LoadStoreKind t) {
55 RawMachineAssemblerTester<int32_t> m; 61 RawMachineAssemblerTester<int32_t> m;
56 62
57 int32_t p1 = 0; // loads directly from this location. 63 int32_t p1 = 0; // loads directly from this location.
58 m.Return(m.LoadFromPointer(&p1, MachineType::Int32())); 64
65 if (t == LoadStoreKind::kLoadStore) {
66 m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
67 } else if (t == LoadStoreKind::kUnalignedLoadStore) {
68 m.Return(m.UnalignedLoadFromPointer(&p1, MachineType::Int32()));
69 } else {
70 UNREACHABLE();
71 }
59 72
60 FOR_INT32_INPUTS(i) { 73 FOR_INT32_INPUTS(i) {
61 p1 = *i; 74 p1 = *i;
62 CHECK_EQ(p1, m.Call()); 75 CHECK_EQ(p1, m.Call());
63 } 76 }
64 } 77 }
65 78
66 TEST(RunLoadInt32Offset) { 79 void RunLoadInt32Offset(LoadStoreKind t) {
67 int32_t p1 = 0; // loads directly from this location. 80 int32_t p1 = 0; // loads directly from this location.
68 81
69 int32_t offsets[] = {-2000000, -100, -101, 1, 3, 82 int32_t offsets[] = {-2000000, -100, -101, 1, 3,
70 7, 120, 2000, 2000000000, 0xff}; 83 7, 120, 2000, 2000000000, 0xff};
71 84
72 for (size_t i = 0; i < arraysize(offsets); i++) { 85 for (size_t i = 0; i < arraysize(offsets); i++) {
73 RawMachineAssemblerTester<int32_t> m; 86 RawMachineAssemblerTester<int32_t> m;
74 int32_t offset = offsets[i]; 87 int32_t offset = offsets[i];
75 byte* pointer = reinterpret_cast<byte*>(&p1) - offset; 88 byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
89
76 // generate load [#base + #index] 90 // generate load [#base + #index]
77 m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset)); 91 if (t == LoadStoreKind::kLoadStore) {
92 m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
93 } else if (t == LoadStoreKind::kUnalignedLoadStore) {
94 m.Return(
95 m.UnalignedLoadFromPointer(pointer, MachineType::Int32(), offset));
96 } else {
97 UNREACHABLE();
98 }
78 99
79 FOR_INT32_INPUTS(j) { 100 FOR_INT32_INPUTS(j) {
80 p1 = *j; 101 p1 = *j;
81 CHECK_EQ(p1, m.Call()); 102 CHECK_EQ(p1, m.Call());
82 } 103 }
83 } 104 }
84 } 105 }
85 106
86 TEST(RunLoadStoreFloat32Offset) { 107 void RunLoadStoreFloat32Offset(LoadStoreKind t) {
87 float p1 = 0.0f; // loads directly from this location. 108 float p1 = 0.0f; // loads directly from this location.
88 float p2 = 0.0f; // and stores directly into this location. 109 float p2 = 0.0f; // and stores directly into this location.
89 110
90 FOR_INT32_INPUTS(i) { 111 FOR_INT32_INPUTS(i) {
91 int32_t magic = 0x2342aabb + *i * 3; 112 int32_t magic = 0x2342aabb + *i * 3;
92 RawMachineAssemblerTester<int32_t> m; 113 RawMachineAssemblerTester<int32_t> m;
93 int32_t offset = *i; 114 int32_t offset = *i;
94 byte* from = reinterpret_cast<byte*>(&p1) - offset; 115 byte* from = reinterpret_cast<byte*>(&p1) - offset;
95 byte* to = reinterpret_cast<byte*>(&p2) - offset; 116 byte* to = reinterpret_cast<byte*>(&p2) - offset;
96 // generate load [#base + #index] 117 // generate load [#base + #index]
97 Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from), 118 if (t == LoadStoreKind::kLoadStore) {
98 m.IntPtrConstant(offset)); 119 Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
99 m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to), 120 m.IntPtrConstant(offset));
100 m.IntPtrConstant(offset), load, kNoWriteBarrier); 121 m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
122 m.IntPtrConstant(offset), load, kNoWriteBarrier);
123 } else if (t == LoadStoreKind::kUnalignedLoadStore) {
124 Node* load =
125 m.UnalignedLoad(MachineType::Float32(), m.PointerConstant(from),
126 m.IntPtrConstant(offset));
127 m.UnalignedStore(MachineRepresentation::kFloat32, m.PointerConstant(to),
128 m.IntPtrConstant(offset), load);
129
130 } else {
131 UNREACHABLE();
132 }
101 m.Return(m.Int32Constant(magic)); 133 m.Return(m.Int32Constant(magic));
102 134
103 FOR_FLOAT32_INPUTS(j) { 135 FOR_FLOAT32_INPUTS(j) {
104 p1 = *j; 136 p1 = *j;
105 p2 = *j - 5; 137 p2 = *j - 5;
106 CHECK_EQ(magic, m.Call()); 138 CHECK_EQ(magic, m.Call());
107 CheckDoubleEq(p1, p2); 139 CheckDoubleEq(p1, p2);
108 } 140 }
109 } 141 }
110 } 142 }
111 143
112 TEST(RunLoadStoreFloat64Offset) { 144 void RunLoadStoreFloat64Offset(LoadStoreKind t) {
113 double p1 = 0; // loads directly from this location. 145 double p1 = 0; // loads directly from this location.
114 double p2 = 0; // and stores directly into this location. 146 double p2 = 0; // and stores directly into this location.
115 147
116 FOR_INT32_INPUTS(i) { 148 FOR_INT32_INPUTS(i) {
117 int32_t magic = 0x2342aabb + *i * 3; 149 int32_t magic = 0x2342aabb + *i * 3;
118 RawMachineAssemblerTester<int32_t> m; 150 RawMachineAssemblerTester<int32_t> m;
119 int32_t offset = *i; 151 int32_t offset = *i;
120 byte* from = reinterpret_cast<byte*>(&p1) - offset; 152 byte* from = reinterpret_cast<byte*>(&p1) - offset;
121 byte* to = reinterpret_cast<byte*>(&p2) - offset; 153 byte* to = reinterpret_cast<byte*>(&p2) - offset;
122 // generate load [#base + #index] 154 // generate load [#base + #index]
123 Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from), 155 if (t == LoadStoreKind::kLoadStore) {
124 m.IntPtrConstant(offset)); 156 Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
125 m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to), 157 m.IntPtrConstant(offset));
126 m.IntPtrConstant(offset), load, kNoWriteBarrier); 158 m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
159 m.IntPtrConstant(offset), load, kNoWriteBarrier);
160 } else if (t == LoadStoreKind::kUnalignedLoadStore) {
161 Node* load =
162 m.UnalignedLoad(MachineType::Float64(), m.PointerConstant(from),
163 m.IntPtrConstant(offset));
164 m.UnalignedStore(MachineRepresentation::kFloat64, m.PointerConstant(to),
165 m.IntPtrConstant(offset), load);
166 } else {
167 UNREACHABLE();
168 }
127 m.Return(m.Int32Constant(magic)); 169 m.Return(m.Int32Constant(magic));
128 170
129 FOR_FLOAT64_INPUTS(j) { 171 FOR_FLOAT64_INPUTS(j) {
130 p1 = *j; 172 p1 = *j;
131 p2 = *j - 5; 173 p2 = *j - 5;
132 CHECK_EQ(magic, m.Call()); 174 CHECK_EQ(magic, m.Call());
133 CheckDoubleEq(p1, p2); 175 CheckDoubleEq(p1, p2);
134 } 176 }
135 } 177 }
136 } 178 }
179 } // namespace
180
181 TEST(RunLoadInt32) { RunLoadInt32(LoadStoreKind::kLoadStore); }
182
183 TEST(RunUnalignedLoadInt32) {
184 RunLoadInt32(LoadStoreKind::kUnalignedLoadStore);
185 }
186
187 TEST(RunLoadInt32Offset) { RunLoadInt32Offset(LoadStoreKind::kLoadStore); }
188
189 TEST(RunUnalignedLoadInt32Offset) {
190 RunLoadInt32Offset(LoadStoreKind::kUnalignedLoadStore);
191 }
192
193 TEST(RunLoadStoreFloat32Offset) {
194 RunLoadStoreFloat32Offset(LoadStoreKind::kLoadStore);
195 }
196
197 TEST(RunUnalignedLoadStoreFloat32Offset) {
198 RunLoadStoreFloat32Offset(LoadStoreKind::kUnalignedLoadStore);
199 }
200
201 TEST(RunLoadStoreFloat64Offset) {
202 RunLoadStoreFloat64Offset(LoadStoreKind::kLoadStore);
203 }
204
205 TEST(RunUnalignedLoadStoreFloat64Offset) {
206 RunLoadStoreFloat64Offset(LoadStoreKind::kUnalignedLoadStore);
207 }
137 208
138 namespace { 209 namespace {
139 template <typename Type> 210 template <typename Type>
140 void RunLoadImmIndex(MachineType rep) { 211 void RunLoadImmIndex(MachineType rep, LoadStoreKind t) {
141 const int kNumElems = 3; 212 const int kNumElems = 3;
142 Type buffer[kNumElems]; 213 Type buffer[kNumElems];
143 214
144 // initialize the buffer with some raw data. 215 // initialize the buffer with some raw data.
145 byte* raw = reinterpret_cast<byte*>(buffer); 216 byte* raw = reinterpret_cast<byte*>(buffer);
146 for (size_t i = 0; i < sizeof(buffer); i++) { 217 for (size_t i = 0; i < sizeof(buffer); i++) {
147 raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA); 218 raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
148 } 219 }
149 220
150 // Test with various large and small offsets. 221 // Test with various large and small offsets.
151 for (int offset = -1; offset <= 200000; offset *= -5) { 222 for (int offset = -1; offset <= 200000; offset *= -5) {
152 for (int i = 0; i < kNumElems; i++) { 223 for (int i = 0; i < kNumElems; i++) {
153 BufferedRawMachineAssemblerTester<Type> m; 224 BufferedRawMachineAssemblerTester<Type> m;
154 Node* base = m.PointerConstant(buffer - offset); 225 Node* base = m.PointerConstant(buffer - offset);
155 Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0])); 226 Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
156 m.Return(m.Load(rep, base, index)); 227 if (t == LoadStoreKind::kLoadStore) {
228 m.Return(m.Load(rep, base, index));
229 } else if (t == LoadStoreKind::kUnalignedLoadStore) {
230 m.Return(m.UnalignedLoad(rep, base, index));
231 } else {
232 UNREACHABLE();
233 }
157 234
158 volatile Type expected = buffer[i]; 235 volatile Type expected = buffer[i];
159 volatile Type actual = m.Call(); 236 volatile Type actual = m.Call();
160 CHECK_EQ(expected, actual); 237 CHECK_EQ(expected, actual);
161 } 238 }
162 } 239 }
163 } 240 }
164 241
165 template <typename CType> 242 template <typename CType>
166 void RunLoadStore(MachineType rep) { 243 void RunLoadStore(MachineType rep, LoadStoreKind t) {
167 const int kNumElems = 4; 244 const int kNumElems = 4;
168 CType buffer[kNumElems]; 245 CType buffer[kNumElems];
169 246
170 for (int32_t x = 0; x < kNumElems; x++) { 247 for (int32_t x = 0; x < kNumElems; x++) {
171 int32_t y = kNumElems - x - 1; 248 int32_t y = kNumElems - x - 1;
172 // initialize the buffer with raw data. 249 // initialize the buffer with raw data.
173 byte* raw = reinterpret_cast<byte*>(buffer); 250 byte* raw = reinterpret_cast<byte*>(buffer);
174 for (size_t i = 0; i < sizeof(buffer); i++) { 251 for (size_t i = 0; i < sizeof(buffer); i++) {
175 raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA); 252 raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
176 } 253 }
177 254
178 RawMachineAssemblerTester<int32_t> m; 255 RawMachineAssemblerTester<int32_t> m;
179 int32_t OK = 0x29000 + x; 256 int32_t OK = 0x29000 + x;
180 Node* base = m.PointerConstant(buffer); 257 Node* base = m.PointerConstant(buffer);
181 Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0])); 258 Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
182 Node* load = m.Load(rep, base, index0);
183 Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0])); 259 Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
184 m.Store(rep.representation(), base, index1, load, kNoWriteBarrier); 260 if (t == LoadStoreKind::kLoadStore) {
261 Node* load = m.Load(rep, base, index0);
262 m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
263 } else if (t == LoadStoreKind::kUnalignedLoadStore) {
264 Node* load = m.UnalignedLoad(rep, base, index0);
265 m.UnalignedStore(rep.representation(), base, index1, load);
266 }
267
185 m.Return(m.Int32Constant(OK)); 268 m.Return(m.Int32Constant(OK));
186 269
187 CHECK(buffer[x] != buffer[y]); 270 CHECK(buffer[x] != buffer[y]);
188 CHECK_EQ(OK, m.Call()); 271 CHECK_EQ(OK, m.Call());
189 CHECK(buffer[x] == buffer[y]); 272 CHECK(buffer[x] == buffer[y]);
190 } 273 }
191 } 274 }
275
276 template <typename CType>
277 void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
278 CType in, out;
279 CType in_buffer[2];
280 CType out_buffer[2];
281 byte* raw;
282
283 for (int x = 0; x < sizeof(CType); x++) {
284 int y = sizeof(CType) - x;
285
286 raw = reinterpret_cast<byte*>(&in);
287 for (size_t i = 0; i < sizeof(CType); i++) {
288 raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
289 }
290
291 raw = reinterpret_cast<byte*>(in_buffer);
292 MemCopy(raw + x, &in, sizeof(CType));
293
294 RawMachineAssemblerTester<int32_t> m;
295 int32_t OK = 0x29000 + x;
296
297 Node* base0 = m.PointerConstant(in_buffer);
298 Node* base1 = m.PointerConstant(out_buffer);
299 Node* index0 = m.IntPtrConstant(x);
300 Node* index1 = m.IntPtrConstant(y);
301 Node* load = m.UnalignedLoad(rep, base0, index0);
302 m.UnalignedStore(rep.representation(), base1, index1, load);
303
304 m.Return(m.Int32Constant(OK));
305
306 CHECK_EQ(OK, m.Call());
307
308 raw = reinterpret_cast<byte*>(&out_buffer);
309 MemCopy(&out, raw + y, sizeof(CType));
310 CHECK(in == out);
311 }
312 }
192 } // namespace 313 } // namespace
193 314
194 TEST(RunLoadImmIndex) { 315 TEST(RunLoadImmIndex) {
195 RunLoadImmIndex<int8_t>(MachineType::Int8()); 316 RunLoadImmIndex<int8_t>(MachineType::Int8(), LoadStoreKind::kLoadStore);
196 RunLoadImmIndex<uint8_t>(MachineType::Uint8()); 317 RunLoadImmIndex<uint8_t>(MachineType::Uint8(), LoadStoreKind::kLoadStore);
197 RunLoadImmIndex<int16_t>(MachineType::Int16()); 318 RunLoadImmIndex<int16_t>(MachineType::Int16(), LoadStoreKind::kLoadStore);
198 RunLoadImmIndex<uint16_t>(MachineType::Uint16()); 319 RunLoadImmIndex<uint16_t>(MachineType::Uint16(), LoadStoreKind::kLoadStore);
199 RunLoadImmIndex<int32_t>(MachineType::Int32()); 320 RunLoadImmIndex<int32_t>(MachineType::Int32(), LoadStoreKind::kLoadStore);
200 RunLoadImmIndex<uint32_t>(MachineType::Uint32()); 321 RunLoadImmIndex<uint32_t>(MachineType::Uint32(), LoadStoreKind::kLoadStore);
201 RunLoadImmIndex<int32_t*>(MachineType::AnyTagged()); 322 RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(),
202 RunLoadImmIndex<float>(MachineType::Float32()); 323 LoadStoreKind::kLoadStore);
203 RunLoadImmIndex<double>(MachineType::Float64()); 324 RunLoadImmIndex<float>(MachineType::Float32(), LoadStoreKind::kLoadStore);
325 RunLoadImmIndex<double>(MachineType::Float64(), LoadStoreKind::kLoadStore);
204 #if V8_TARGET_ARCH_64_BIT 326 #if V8_TARGET_ARCH_64_BIT
205 RunLoadImmIndex<int64_t>(MachineType::Int64()); 327 RunLoadImmIndex<int64_t>(MachineType::Int64(), LoadStoreKind::kLoadStore);
328 #endif
329 // TODO(titzer): test various indexing modes.
330 }
331
332 TEST(RunUnalignedLoadImmIndex) {
333 RunLoadImmIndex<int16_t>(MachineType::Int16(),
334 LoadStoreKind::kUnalignedLoadStore);
335 RunLoadImmIndex<uint16_t>(MachineType::Uint16(),
336 LoadStoreKind::kUnalignedLoadStore);
337 RunLoadImmIndex<int32_t>(MachineType::Int32(),
338 LoadStoreKind::kUnalignedLoadStore);
339 RunLoadImmIndex<uint32_t>(MachineType::Uint32(),
340 LoadStoreKind::kUnalignedLoadStore);
341 RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(),
342 LoadStoreKind::kUnalignedLoadStore);
343 RunLoadImmIndex<float>(MachineType::Float32(),
344 LoadStoreKind::kUnalignedLoadStore);
345 RunLoadImmIndex<double>(MachineType::Float64(),
346 LoadStoreKind::kUnalignedLoadStore);
347 #if V8_TARGET_ARCH_64_BIT
348 RunLoadImmIndex<int64_t>(MachineType::Int64(),
349 LoadStoreKind::kUnalignedLoadStore);
206 #endif 350 #endif
207 // TODO(titzer): test various indexing modes. 351 // TODO(titzer): test various indexing modes.
208 } 352 }
209 353
210 TEST(RunLoadStore) { 354 TEST(RunLoadStore) {
211 RunLoadStore<int8_t>(MachineType::Int8()); 355 RunLoadStore<int8_t>(MachineType::Int8(), LoadStoreKind::kLoadStore);
212 RunLoadStore<uint8_t>(MachineType::Uint8()); 356 RunLoadStore<uint8_t>(MachineType::Uint8(), LoadStoreKind::kLoadStore);
213 RunLoadStore<int16_t>(MachineType::Int16()); 357 RunLoadStore<int16_t>(MachineType::Int16(), LoadStoreKind::kLoadStore);
214 RunLoadStore<uint16_t>(MachineType::Uint16()); 358 RunLoadStore<uint16_t>(MachineType::Uint16(), LoadStoreKind::kLoadStore);
215 RunLoadStore<int32_t>(MachineType::Int32()); 359 RunLoadStore<int32_t>(MachineType::Int32(), LoadStoreKind::kLoadStore);
216 RunLoadStore<uint32_t>(MachineType::Uint32()); 360 RunLoadStore<uint32_t>(MachineType::Uint32(), LoadStoreKind::kLoadStore);
217 RunLoadStore<void*>(MachineType::AnyTagged()); 361 RunLoadStore<void*>(MachineType::AnyTagged(), LoadStoreKind::kLoadStore);
218 RunLoadStore<float>(MachineType::Float32()); 362 RunLoadStore<float>(MachineType::Float32(), LoadStoreKind::kLoadStore);
219 RunLoadStore<double>(MachineType::Float64()); 363 RunLoadStore<double>(MachineType::Float64(), LoadStoreKind::kLoadStore);
220 #if V8_TARGET_ARCH_64_BIT 364 #if V8_TARGET_ARCH_64_BIT
221 RunLoadStore<int64_t>(MachineType::Int64()); 365 RunLoadStore<int64_t>(MachineType::Int64(), LoadStoreKind::kLoadStore);
222 #endif 366 #endif
223 } 367 }
224 368
369 TEST(RunUnalignedLoadStore) {
370 RunLoadStore<int16_t>(MachineType::Int16(),
371 LoadStoreKind::kUnalignedLoadStore);
372 RunLoadStore<uint16_t>(MachineType::Uint16(),
373 LoadStoreKind::kUnalignedLoadStore);
374 RunLoadStore<int32_t>(MachineType::Int32(),
375 LoadStoreKind::kUnalignedLoadStore);
376 RunLoadStore<uint32_t>(MachineType::Uint32(),
377 LoadStoreKind::kUnalignedLoadStore);
378 RunLoadStore<void*>(MachineType::AnyTagged(),
379 LoadStoreKind::kUnalignedLoadStore);
380 RunLoadStore<float>(MachineType::Float32(),
381 LoadStoreKind::kUnalignedLoadStore);
382 RunLoadStore<double>(MachineType::Float64(),
383 LoadStoreKind::kUnalignedLoadStore);
384 #if V8_TARGET_ARCH_64_BIT
385 RunLoadStore<int64_t>(MachineType::Int64(),
386 LoadStoreKind::kUnalignedLoadStore);
387 #endif
388 }
389
390 TEST(RunUnalignedLoadStoreUnalignedAccess) {
391 RunUnalignedLoadStoreUnalignedAccess<int16_t>(MachineType::Int16());
392 RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
393 RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
394 RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
395 RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::AnyTagged());
396 RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
397 RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
398 #if V8_TARGET_ARCH_64_BIT
399 RunUnalignedLoadStoreUnalignedAccess<int64_t>(MachineType::Int64());
400 #endif
401 }
402
225 #if V8_TARGET_LITTLE_ENDIAN 403 #if V8_TARGET_LITTLE_ENDIAN
226 #define LSB(addr, bytes) addr 404 #define LSB(addr, bytes) addr
227 #elif V8_TARGET_BIG_ENDIAN 405 #elif V8_TARGET_BIG_ENDIAN
228 #define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes 406 #define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes
229 #else 407 #else
230 #error "Unknown Architecture" 408 #error "Unknown Architecture"
231 #endif 409 #endif
232 410
233 TEST(RunLoadStoreSignExtend32) { 411 namespace {
412 void RunLoadStoreSignExtend32(LoadStoreKind t) {
234 int32_t buffer[4]; 413 int32_t buffer[4];
235 RawMachineAssemblerTester<int32_t> m; 414 RawMachineAssemblerTester<int32_t> m;
236 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8()); 415 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
237 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16()); 416 if (t == LoadStoreKind::kLoadStore) {
238 Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Int32()); 417 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
239 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8); 418 Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Int32());
240 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16); 419 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
241 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32); 420 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
421 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
422 } else if (t == LoadStoreKind::kUnalignedLoadStore) {
423 Node* load16 =
424 m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
425 Node* load32 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int32());
426 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
427 m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
428 load16);
429 m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
430 load32);
431 } else {
432 UNREACHABLE();
433 }
242 m.Return(load8); 434 m.Return(load8);
243 435
244 FOR_INT32_INPUTS(i) { 436 FOR_INT32_INPUTS(i) {
245 buffer[0] = *i; 437 buffer[0] = *i;
246 438
247 CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call()); 439 CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call());
248 CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]); 440 CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]);
249 CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]); 441 CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]);
250 CHECK_EQ(*i, buffer[3]); 442 CHECK_EQ(*i, buffer[3]);
251 } 443 }
252 } 444 }
253 445
254 TEST(RunLoadStoreZeroExtend32) { 446 void RunLoadStoreZeroExtend32(LoadStoreKind t) {
255 uint32_t buffer[4]; 447 uint32_t buffer[4];
256 RawMachineAssemblerTester<uint32_t> m; 448 RawMachineAssemblerTester<uint32_t> m;
257 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8()); 449 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
258 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16()); 450 if (t == LoadStoreKind::kLoadStore) {
259 Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Uint32()); 451 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
260 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8); 452 Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Uint32());
261 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16); 453 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
262 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32); 454 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
455 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
456 } else if (t == LoadStoreKind::kUnalignedLoadStore) {
457 Node* load16 =
458 m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
459 Node* load32 =
460 m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint32());
461 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
462 m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
463 load16);
464 m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
465 load32);
466 }
263 m.Return(load8); 467 m.Return(load8);
264 468
265 FOR_UINT32_INPUTS(i) { 469 FOR_UINT32_INPUTS(i) {
266 buffer[0] = *i; 470 buffer[0] = *i;
267 471
268 CHECK_EQ((*i & 0xff), m.Call()); 472 CHECK_EQ((*i & 0xff), m.Call());
269 CHECK_EQ((*i & 0xff), buffer[1]); 473 CHECK_EQ((*i & 0xff), buffer[1]);
270 CHECK_EQ((*i & 0xffff), buffer[2]); 474 CHECK_EQ((*i & 0xffff), buffer[2]);
271 CHECK_EQ(*i, buffer[3]); 475 CHECK_EQ(*i, buffer[3]);
272 } 476 }
273 } 477 }
478 } // namespace
479
480 TEST(RunLoadStoreSignExtend32) {
481 RunLoadStoreSignExtend32(LoadStoreKind::kLoadStore);
482 }
483
484 TEST(RunUnalignedLoadStoreSignExtend32) {
485 RunLoadStoreSignExtend32(LoadStoreKind::kUnalignedLoadStore);
486 }
487
488 TEST(RunLoadStoreZeroExtend32) {
489 RunLoadStoreZeroExtend32(LoadStoreKind::kLoadStore);
490 }
491
492 TEST(RunUnalignedLoadStoreZeroExtend32) {
493 RunLoadStoreZeroExtend32(LoadStoreKind::kUnalignedLoadStore);
494 }
274 495
275 #if V8_TARGET_ARCH_64_BIT 496 #if V8_TARGET_ARCH_64_BIT
276 TEST(RunCheckedLoadInt64) {
277 int64_t buffer[] = {0x66bbccddeeff0011LL, 0x1122334455667788LL};
278 RawMachineAssemblerTester<int64_t> m(MachineType::Int32());
279 Node* base = m.PointerConstant(buffer);
280 Node* index = m.Parameter(0);
281 Node* length = m.Int32Constant(16);
282 Node* load = m.AddNode(m.machine()->CheckedLoad(MachineType::Int64()), base,
283 index, length);
284 m.Return(load);
285 497
286 CHECK_EQ(buffer[0], m.Call(0)); 498 namespace {
287 CHECK_EQ(buffer[1], m.Call(8)); 499 void RunLoadStoreSignExtend64(LoadStoreKind t) {
288 CheckOobValue(m.Call(16));
289 }
290
291 TEST(RunLoadStoreSignExtend64) {
292 if (true) return; // TODO(titzer): sign extension of loads to 64-bit. 500 if (true) return; // TODO(titzer): sign extension of loads to 64-bit.
293 int64_t buffer[5]; 501 int64_t buffer[5];
294 RawMachineAssemblerTester<int64_t> m; 502 RawMachineAssemblerTester<int64_t> m;
295 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8()); 503 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
296 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16()); 504 if (t == LoadStoreKind::kLoadStore) {
297 Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32()); 505 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
298 Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Int64()); 506 Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
299 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8); 507 Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Int64());
300 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16); 508 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
301 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32); 509 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
302 m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64); 510 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
511 m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
512 } else if (t == LoadStoreKind::kUnalignedLoadStore) {
513 Node* load16 =
514 m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
515 Node* load32 =
516 m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
517 Node* load64 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int64());
518 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
519 m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
520 load16);
521 m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
522 load32);
523 m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
524 load64);
525 } else {
526 UNREACHABLE();
527 }
303 m.Return(load8); 528 m.Return(load8);
304 529
305 FOR_INT64_INPUTS(i) { 530 FOR_INT64_INPUTS(i) {
306 buffer[0] = *i; 531 buffer[0] = *i;
307 532
308 CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call()); 533 CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call());
309 CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]); 534 CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]);
310 CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]); 535 CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]);
311 CHECK_EQ(static_cast<int32_t>(*i & 0xffffffff), buffer[3]); 536 CHECK_EQ(static_cast<int32_t>(*i & 0xffffffff), buffer[3]);
312 CHECK_EQ(*i, buffer[4]); 537 CHECK_EQ(*i, buffer[4]);
313 } 538 }
314 } 539 }
315 540
316 TEST(RunLoadStoreZeroExtend64) { 541 void RunLoadStoreZeroExtend64(LoadStoreKind t) {
317 if (kPointerSize < 8) return; 542 if (kPointerSize < 8) return;
318 uint64_t buffer[5]; 543 uint64_t buffer[5];
319 RawMachineAssemblerTester<int64_t> m; 544 RawMachineAssemblerTester<int64_t> m;
320 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8()); 545 Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
321 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16()); 546 if (t == LoadStoreKind::kLoadStore) {
322 Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32()); 547 Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
323 Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Uint64()); 548 Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
324 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8); 549 Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Uint64());
325 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16); 550 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
326 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32); 551 m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
327 m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64); 552 m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
553 m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
554 } else if (t == LoadStoreKind::kUnalignedLoadStore) {
555 Node* load16 =
556 m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
557 Node* load32 =
558 m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
559 Node* load64 =
560 m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint64());
561 m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
562 m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
563 load16);
564 m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
565 load32);
566 m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
567 load64);
568 } else {
569 UNREACHABLE();
570 }
328 m.Return(load8); 571 m.Return(load8);
329 572
330 FOR_UINT64_INPUTS(i) { 573 FOR_UINT64_INPUTS(i) {
331 buffer[0] = *i; 574 buffer[0] = *i;
332 575
333 CHECK_EQ((*i & 0xff), m.Call()); 576 CHECK_EQ((*i & 0xff), m.Call());
334 CHECK_EQ((*i & 0xff), buffer[1]); 577 CHECK_EQ((*i & 0xff), buffer[1]);
335 CHECK_EQ((*i & 0xffff), buffer[2]); 578 CHECK_EQ((*i & 0xffff), buffer[2]);
336 CHECK_EQ((*i & 0xffffffff), buffer[3]); 579 CHECK_EQ((*i & 0xffffffff), buffer[3]);
337 CHECK_EQ(*i, buffer[4]); 580 CHECK_EQ(*i, buffer[4]);
338 } 581 }
339 } 582 }
340 583
584 } // namespace
585
586 TEST(RunCheckedLoadInt64) {
587 int64_t buffer[] = {0x66bbccddeeff0011LL, 0x1122334455667788LL};
588 RawMachineAssemblerTester<int64_t> m(MachineType::Int32());
589 Node* base = m.PointerConstant(buffer);
590 Node* index = m.Parameter(0);
591 Node* length = m.Int32Constant(16);
592 Node* load = m.AddNode(m.machine()->CheckedLoad(MachineType::Int64()), base,
593 index, length);
594 m.Return(load);
595
596 CHECK_EQ(buffer[0], m.Call(0));
597 CHECK_EQ(buffer[1], m.Call(8));
598 CheckOobValue(m.Call(16));
599 }
600
601 TEST(RunLoadStoreSignExtend64) {
602 RunLoadStoreSignExtend64(LoadStoreKind::kLoadStore);
603 }
604
605 TEST(RunUnalignedLoadStoreSignExtend64) {
606 RunLoadStoreSignExtend64(LoadStoreKind::kUnalignedLoadStore);
607 }
608
609 TEST(RunLoadStoreZeroExtend64) {
610 RunLoadStoreZeroExtend64(LoadStoreKind::kLoadStore);
611 }
612
613 TEST(RunUnalignedLoadStoreZeroExtend64) {
614 RunLoadStoreZeroExtend64(LoadStoreKind::kUnalignedLoadStore);
615 }
616
341 TEST(RunCheckedStoreInt64) { 617 TEST(RunCheckedStoreInt64) {
342 const int64_t write = 0x5566778899aabbLL; 618 const int64_t write = 0x5566778899aabbLL;
343 const int64_t before = 0x33bbccddeeff0011LL; 619 const int64_t before = 0x33bbccddeeff0011LL;
344 int64_t buffer[] = {before, before}; 620 int64_t buffer[] = {before, before};
345 RawMachineAssemblerTester<int32_t> m(MachineType::Int32()); 621 RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
346 Node* base = m.PointerConstant(buffer); 622 Node* base = m.PointerConstant(buffer);
347 Node* index = m.Parameter(0); 623 Node* index = m.Parameter(0);
348 Node* length = m.Int32Constant(16); 624 Node* length = m.Int32Constant(16);
349 Node* value = m.Int64Constant(write); 625 Node* value = m.Int64Constant(write);
350 Node* store = 626 Node* store =
(...skipping 11 matching lines...) Expand all
362 CHECK_EQ(before, buffer[1]); 638 CHECK_EQ(before, buffer[1]);
363 639
364 CHECK_EQ(11, m.Call(8)); 640 CHECK_EQ(11, m.Call(8));
365 CHECK_EQ(write, buffer[0]); 641 CHECK_EQ(write, buffer[0]);
366 CHECK_EQ(write, buffer[1]); 642 CHECK_EQ(write, buffer[1]);
367 } 643 }
368 #endif 644 #endif
369 645
370 namespace { 646 namespace {
371 template <typename IntType> 647 template <typename IntType>
372 void LoadStoreTruncation(MachineType kRepresentation) { 648 void LoadStoreTruncation(MachineType kRepresentation, LoadStoreKind t) {
373 IntType input; 649 IntType input;
374 650
375 RawMachineAssemblerTester<int32_t> m; 651 RawMachineAssemblerTester<int32_t> m;
376 Node* a = m.LoadFromPointer(&input, kRepresentation); 652 Node* ap1;
377 Node* ap1 = m.Int32Add(a, m.Int32Constant(1)); 653 if (t == LoadStoreKind::kLoadStore) {
378 m.StoreToPointer(&input, kRepresentation.representation(), ap1); 654 Node* a = m.LoadFromPointer(&input, kRepresentation);
655 ap1 = m.Int32Add(a, m.Int32Constant(1));
656 m.StoreToPointer(&input, kRepresentation.representation(), ap1);
657 } else if (t == LoadStoreKind::kUnalignedLoadStore) {
658 Node* a = m.UnalignedLoadFromPointer(&input, kRepresentation);
659 ap1 = m.Int32Add(a, m.Int32Constant(1));
660 m.UnalignedStoreToPointer(&input, kRepresentation.representation(), ap1);
661 } else {
662 UNREACHABLE();
663 }
379 m.Return(ap1); 664 m.Return(ap1);
380 665
381 const IntType max = std::numeric_limits<IntType>::max(); 666 const IntType max = std::numeric_limits<IntType>::max();
382 const IntType min = std::numeric_limits<IntType>::min(); 667 const IntType min = std::numeric_limits<IntType>::min();
383 668
384 // Test upper bound. 669 // Test upper bound.
385 input = max; 670 input = max;
386 CHECK_EQ(max + 1, m.Call()); 671 CHECK_EQ(max + 1, m.Call());
387 CHECK_EQ(min, input); 672 CHECK_EQ(min, input);
388 673
389 // Test lower bound. 674 // Test lower bound.
390 input = min; 675 input = min;
391 CHECK_EQ(static_cast<IntType>(max + 2), m.Call()); 676 CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
392 CHECK_EQ(min + 1, input); 677 CHECK_EQ(min + 1, input);
393 678
394 // Test all one byte values that are not one byte bounds. 679 // Test all one byte values that are not one byte bounds.
395 for (int i = -127; i < 127; i++) { 680 for (int i = -127; i < 127; i++) {
396 input = i; 681 input = i;
397 int expected = i >= 0 ? i + 1 : max + (i - min) + 2; 682 int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
398 CHECK_EQ(static_cast<IntType>(expected), m.Call()); 683 CHECK_EQ(static_cast<IntType>(expected), m.Call());
399 CHECK_EQ(static_cast<IntType>(i + 1), input); 684 CHECK_EQ(static_cast<IntType>(i + 1), input);
400 } 685 }
401 } 686 }
402 } // namespace 687 } // namespace
403 688
404 TEST(RunLoadStoreTruncation) { 689 TEST(RunLoadStoreTruncation) {
405 LoadStoreTruncation<int8_t>(MachineType::Int8()); 690 LoadStoreTruncation<int8_t>(MachineType::Int8(), LoadStoreKind::kLoadStore);
406 LoadStoreTruncation<int16_t>(MachineType::Int16()); 691 LoadStoreTruncation<int16_t>(MachineType::Int16(), LoadStoreKind::kLoadStore);
692 }
693
694 TEST(RunUnalignedLoadStoreTruncation) {
695 LoadStoreTruncation<int16_t>(MachineType::Int16(),
696 LoadStoreKind::kUnalignedLoadStore);
407 } 697 }
408 698
409 void TestRunOobCheckedLoad(bool length_is_immediate) { 699 void TestRunOobCheckedLoad(bool length_is_immediate) {
410 USE(CheckOobValue<int32_t>); 700 USE(CheckOobValue<int32_t>);
411 USE(CheckOobValue<int64_t>); 701 USE(CheckOobValue<int64_t>);
412 USE(CheckOobValue<float>); 702 USE(CheckOobValue<float>);
413 USE(CheckOobValue<double>); 703 USE(CheckOobValue<double>);
414 704
415 RawMachineAssemblerTester<int32_t> m(MachineType::Int32(), 705 RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
416 MachineType::Int32()); 706 MachineType::Int32());
(...skipping 493 matching lines...) Expand 10 before | Expand all | Expand 10 after
910 TestRunOobCheckedLoadT_pseudo<int32_t>(4 * A_BILLION, true); 1200 TestRunOobCheckedLoadT_pseudo<int32_t>(4 * A_BILLION, true);
911 TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, false); 1201 TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, false);
912 TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, true); 1202 TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, true);
913 TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, false); 1203 TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, false);
914 TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, true); 1204 TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, true);
915 } 1205 }
916 1206
917 } // namespace compiler 1207 } // namespace compiler
918 } // namespace internal 1208 } // namespace internal
919 } // namespace v8 1209 } // namespace v8
OLDNEW
« no previous file with comments | « src/machine-type.h ('k') | test/unittests/compiler/int64-lowering-unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698