Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(791)

Side by Side Diff: test/cctest/compiler/test-simplified-lowering.cc

Issue 2292463002: [turbofan] Remove invalid typing rules. (Closed)
Patch Set: Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits>
6
7 #include "src/ast/scopes.h"
8 #include "src/compiler/access-builder.h"
9 #include "src/compiler/control-builders.h"
10 #include "src/compiler/effect-control-linearizer.h"
11 #include "src/compiler/graph-visualizer.h"
12 #include "src/compiler/memory-optimizer.h"
13 #include "src/compiler/node-properties.h"
14 #include "src/compiler/pipeline.h"
15 #include "src/compiler/representation-change.h"
16 #include "src/compiler/scheduler.h"
17 #include "src/compiler/simplified-lowering.h"
18 #include "src/compiler/source-position.h"
19 #include "src/compiler/typer.h"
20 #include "src/compiler/verifier.h"
21 #include "src/execution.h"
22 #include "src/parsing/parser.h"
23 #include "src/parsing/rewriter.h"
24 #include "test/cctest/cctest.h"
25 #include "test/cctest/compiler/codegen-tester.h"
26 #include "test/cctest/compiler/function-tester.h"
27 #include "test/cctest/compiler/graph-builder-tester.h"
28 #include "test/cctest/compiler/value-helper.h"
29
30 namespace v8 {
31 namespace internal {
32 namespace compiler {
33
34 template <typename ReturnType>
35 class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
36 public:
37 SimplifiedLoweringTester(MachineType p0 = MachineType::None(),
38 MachineType p1 = MachineType::None())
39 : GraphBuilderTester<ReturnType>(p0, p1),
40 typer(new Typer(this->isolate(), this->graph())),
41 javascript(this->zone()),
42 jsgraph(this->isolate(), this->graph(), this->common(), &javascript,
43 this->simplified(), this->machine()),
44 source_positions(jsgraph.graph()),
45 lowering(&jsgraph, this->zone(), &source_positions) {}
46 ~SimplifiedLoweringTester() final { delete typer; }
47
48 Typer* typer = nullptr;
49 JSOperatorBuilder javascript;
50 JSGraph jsgraph;
51 SourcePositionTable source_positions;
52 SimplifiedLowering lowering;
53
54 void LowerAllNodes() {
55 this->End();
56 typer->Run();
57 delete typer, typer = nullptr;
58 lowering.LowerAllNodes();
59 }
60
61 void LowerAllNodesAndLowerChanges() {
62 this->End();
63 typer->Run();
64 delete typer, typer = nullptr;
65 lowering.LowerAllNodes();
66
67 Schedule* schedule = Scheduler::ComputeSchedule(this->zone(), this->graph(),
68 Scheduler::kNoFlags);
69 EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone());
70 linearizer.Run();
71
72 MemoryOptimizer memory_optimizer(&jsgraph, this->zone());
73 memory_optimizer.Optimize();
74 }
75
76 void CheckNumberCall(double expected, double input) {
77 // TODO(titzer): make calls to NewNumber work in cctests.
78 if (expected <= Smi::kMinValue) return;
79 if (expected >= Smi::kMaxValue) return;
80 Handle<Object> num = factory()->NewNumber(input);
81 Object* result = this->Call(*num);
82 CHECK(factory()->NewNumber(expected)->SameValue(result));
83 }
84
85 template <typename T>
86 T* CallWithPotentialGC() {
87 // TODO(titzer): we wrap the code in a JSFunction here to reuse the
88 // JSEntryStub; that could be done with a special prologue or other stub.
89 Handle<JSFunction> fun = FunctionTester::ForMachineGraph(this->graph(), 0);
90 Handle<Object>* args = NULL;
91 MaybeHandle<Object> result = Execution::Call(
92 this->isolate(), fun, factory()->undefined_value(), 0, args);
93 return T::cast(*result.ToHandleChecked());
94 }
95
96 Factory* factory() { return this->isolate()->factory(); }
97 Heap* heap() { return this->isolate()->heap(); }
98 };
99
100
101 // TODO(titzer): factor these tests out to test-run-simplifiedops.cc.
102 // TODO(titzer): test tagged representation for input to NumberToInt32.
103 TEST(RunNumberToInt32_float64) {
104 // TODO(titzer): explicit load/stores here are only because of representations
105 double input;
106 int32_t result;
107 SimplifiedLoweringTester<Object*> t;
108 FieldAccess load = {kUntaggedBase, 0,
109 Handle<Name>(), Type::Number(),
110 MachineType::Float64(), kNoWriteBarrier};
111 Node* loaded = t.LoadField(load, t.PointerConstant(&input));
112 NodeProperties::SetType(loaded, Type::Number());
113 Node* convert = t.NumberToInt32(loaded);
114 FieldAccess store = {kUntaggedBase, 0,
115 Handle<Name>(), Type::Signed32(),
116 MachineType::Int32(), kNoWriteBarrier};
117 t.StoreField(store, t.PointerConstant(&result), convert);
118 t.Return(t.jsgraph.TrueConstant());
119 t.LowerAllNodesAndLowerChanges();
120 t.GenerateCode();
121
122 FOR_FLOAT64_INPUTS(i) {
123 input = *i;
124 int32_t expected = DoubleToInt32(*i);
125 t.Call();
126 CHECK_EQ(expected, result);
127 }
128 }
129
130
131 // TODO(titzer): test tagged representation for input to NumberToUint32.
132 TEST(RunNumberToUint32_float64) {
133 // TODO(titzer): explicit load/stores here are only because of representations
134 double input;
135 uint32_t result;
136 SimplifiedLoweringTester<Object*> t;
137 FieldAccess load = {kUntaggedBase, 0,
138 Handle<Name>(), Type::Number(),
139 MachineType::Float64(), kNoWriteBarrier};
140 Node* loaded = t.LoadField(load, t.PointerConstant(&input));
141 NodeProperties::SetType(loaded, Type::Number());
142 Node* convert = t.NumberToUint32(loaded);
143 FieldAccess store = {kUntaggedBase, 0,
144 Handle<Name>(), Type::Unsigned32(),
145 MachineType::Uint32(), kNoWriteBarrier};
146 t.StoreField(store, t.PointerConstant(&result), convert);
147 t.Return(t.jsgraph.TrueConstant());
148 t.LowerAllNodesAndLowerChanges();
149 t.GenerateCode();
150
151 FOR_FLOAT64_INPUTS(i) {
152 input = *i;
153 uint32_t expected = DoubleToUint32(*i);
154 t.Call();
155 CHECK_EQ(static_cast<int32_t>(expected), static_cast<int32_t>(result));
156 }
157 }
158
159
160 // Create a simple JSObject with a unique map.
161 static Handle<JSObject> TestObject() {
162 static int index = 0;
163 char buffer[50];
164 v8::base::OS::SNPrintF(buffer, 50, "({'a_%d':1})", index++);
165 return Handle<JSObject>::cast(v8::Utils::OpenHandle(*CompileRun(buffer)));
166 }
167
168
169 TEST(RunLoadMap) {
170 SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
171 FieldAccess access = AccessBuilder::ForMap();
172 Node* load = t.LoadField(access, t.Parameter(0));
173 t.Return(load);
174
175 t.LowerAllNodesAndLowerChanges();
176 t.GenerateCode();
177
178 Handle<JSObject> src = TestObject();
179 Handle<Map> src_map(src->map());
180 Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
181 CHECK_EQ(*src_map, result);
182 }
183
184
185 TEST(RunStoreMap) {
186 SimplifiedLoweringTester<int32_t> t(MachineType::AnyTagged(),
187 MachineType::AnyTagged());
188 FieldAccess access = AccessBuilder::ForMap();
189 t.StoreField(access, t.Parameter(1), t.Parameter(0));
190 t.Return(t.jsgraph.TrueConstant());
191
192 t.LowerAllNodesAndLowerChanges();
193 t.GenerateCode();
194
195 Handle<JSObject> src = TestObject();
196 Handle<Map> src_map(src->map());
197 Handle<JSObject> dst = TestObject();
198 CHECK(src->map() != dst->map());
199 t.Call(*src_map, *dst); // TODO(titzer): raw pointers in call
200 CHECK(*src_map == dst->map());
201 }
202
203
204 TEST(RunLoadProperties) {
205 SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
206 FieldAccess access = AccessBuilder::ForJSObjectProperties();
207 Node* load = t.LoadField(access, t.Parameter(0));
208 t.Return(load);
209
210 t.LowerAllNodesAndLowerChanges();
211 t.GenerateCode();
212
213 Handle<JSObject> src = TestObject();
214 Handle<FixedArray> src_props(src->properties());
215 Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
216 CHECK_EQ(*src_props, result);
217 }
218
219
220 TEST(RunLoadStoreMap) {
221 SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged(),
222 MachineType::AnyTagged());
223 FieldAccess access = AccessBuilder::ForMap();
224 Node* load = t.LoadField(access, t.Parameter(0));
225 t.StoreField(access, t.Parameter(1), load);
226 t.Return(load);
227
228 t.LowerAllNodesAndLowerChanges();
229 t.GenerateCode();
230
231 Handle<JSObject> src = TestObject();
232 Handle<Map> src_map(src->map());
233 Handle<JSObject> dst = TestObject();
234 CHECK(src->map() != dst->map());
235 Object* result = t.Call(*src, *dst); // TODO(titzer): raw pointers in call
236 CHECK(result->IsMap());
237 CHECK_EQ(*src_map, result);
238 CHECK(*src_map == dst->map());
239 }
240
241
242 TEST(RunLoadStoreFixedArrayIndex) {
243 SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
244 ElementAccess access = AccessBuilder::ForFixedArrayElement();
245 Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0));
246 t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), load);
247 t.Return(load);
248
249 t.LowerAllNodesAndLowerChanges();
250 t.GenerateCode();
251
252 Handle<FixedArray> array = t.factory()->NewFixedArray(2);
253 Handle<JSObject> src = TestObject();
254 Handle<JSObject> dst = TestObject();
255 array->set(0, *src);
256 array->set(1, *dst);
257 Object* result = t.Call(*array);
258 CHECK_EQ(*src, result);
259 CHECK_EQ(*src, array->get(0));
260 CHECK_EQ(*src, array->get(1));
261 }
262
263
264 TEST(RunLoadStoreArrayBuffer) {
265 SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
266 const int index = 12;
267 const int array_length = 2 * index;
268 ElementAccess buffer_access =
269 AccessBuilder::ForTypedArrayElement(kExternalInt8Array, true);
270 Node* backing_store = t.LoadField(
271 AccessBuilder::ForJSArrayBufferBackingStore(), t.Parameter(0));
272 Node* load =
273 t.LoadElement(buffer_access, backing_store, t.Int32Constant(index));
274 t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
275 load);
276 t.Return(t.jsgraph.TrueConstant());
277
278 t.LowerAllNodesAndLowerChanges();
279 t.GenerateCode();
280
281 Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
282 JSArrayBuffer::SetupAllocatingData(array, t.isolate(), array_length);
283 uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
284 for (int i = 0; i < array_length; i++) {
285 data[i] = i;
286 }
287
288 // TODO(titzer): raw pointers in call
289 Object* result = t.Call(*array);
290 CHECK_EQ(t.isolate()->heap()->true_value(), result);
291 for (int i = 0; i < array_length; i++) {
292 uint8_t expected = i;
293 if (i == (index + 1)) expected = index;
294 CHECK_EQ(data[i], expected);
295 }
296 }
297
298
299 TEST(RunLoadFieldFromUntaggedBase) {
300 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
301
302 for (size_t i = 0; i < arraysize(smis); i++) {
303 int offset = static_cast<int>(i * sizeof(Smi*));
304 FieldAccess access = {kUntaggedBase,
305 offset,
306 Handle<Name>(),
307 Type::Integral32(),
308 MachineType::AnyTagged(),
309 kNoWriteBarrier};
310
311 SimplifiedLoweringTester<Object*> t;
312 Node* load = t.LoadField(access, t.PointerConstant(smis));
313 t.Return(load);
314 t.LowerAllNodesAndLowerChanges();
315
316 for (int j = -5; j <= 5; j++) {
317 Smi* expected = Smi::FromInt(j);
318 smis[i] = expected;
319 CHECK_EQ(expected, t.Call());
320 }
321 }
322 }
323
324
325 TEST(RunStoreFieldToUntaggedBase) {
326 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
327
328 for (size_t i = 0; i < arraysize(smis); i++) {
329 int offset = static_cast<int>(i * sizeof(Smi*));
330 FieldAccess access = {kUntaggedBase,
331 offset,
332 Handle<Name>(),
333 Type::Integral32(),
334 MachineType::AnyTagged(),
335 kNoWriteBarrier};
336
337 SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
338 Node* p0 = t.Parameter(0);
339 t.StoreField(access, t.PointerConstant(smis), p0);
340 t.Return(p0);
341 t.LowerAllNodesAndLowerChanges();
342
343 for (int j = -5; j <= 5; j++) {
344 Smi* expected = Smi::FromInt(j);
345 smis[i] = Smi::FromInt(-100);
346 CHECK_EQ(expected, t.Call(expected));
347 CHECK_EQ(expected, smis[i]);
348 }
349 }
350 }
351
352
353 TEST(RunLoadElementFromUntaggedBase) {
354 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
355 Smi::FromInt(4), Smi::FromInt(5)};
356
357 for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
358 for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
359 int offset = static_cast<int>(i * sizeof(Smi*));
360 ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
361 MachineType::AnyTagged(), kNoWriteBarrier};
362
363 SimplifiedLoweringTester<Object*> t;
364 Node* load = t.LoadElement(access, t.PointerConstant(smis),
365 t.Int32Constant(static_cast<int>(j)));
366 t.Return(load);
367 t.LowerAllNodesAndLowerChanges();
368
369 for (int k = -5; k <= 5; k++) {
370 Smi* expected = Smi::FromInt(k);
371 smis[i + j] = expected;
372 CHECK_EQ(expected, t.Call());
373 }
374 }
375 }
376 }
377
378
379 TEST(RunStoreElementFromUntaggedBase) {
380 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
381 Smi::FromInt(4), Smi::FromInt(5)};
382
383 for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
384 for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
385 int offset = static_cast<int>(i * sizeof(Smi*));
386 ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
387 MachineType::AnyTagged(), kNoWriteBarrier};
388
389 SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
390 Node* p0 = t.Parameter(0);
391 t.StoreElement(access, t.PointerConstant(smis),
392 t.Int32Constant(static_cast<int>(j)), p0);
393 t.Return(p0);
394 t.LowerAllNodesAndLowerChanges();
395
396 for (int k = -5; k <= 5; k++) {
397 Smi* expected = Smi::FromInt(k);
398 smis[i + j] = Smi::FromInt(-100);
399 CHECK_EQ(expected, t.Call(expected));
400 CHECK_EQ(expected, smis[i + j]);
401 }
402
403 // TODO(titzer): assert the contents of the array.
404 }
405 }
406 }
407
408
409 // A helper class for accessing fields and elements of various types, on both
410 // tagged and untagged base pointers. Contains both tagged and untagged buffers
411 // for testing direct memory access from generated code.
412 template <typename E>
413 class AccessTester : public HandleAndZoneScope {
414 public:
415 bool tagged;
416 MachineType rep;
417 E* original_elements;
418 size_t num_elements;
419 E* untagged_array;
420 Handle<ByteArray> tagged_array; // TODO(titzer): use FixedArray for tagged.
421
422 AccessTester(bool t, MachineType r, E* orig, size_t num)
423 : tagged(t),
424 rep(r),
425 original_elements(orig),
426 num_elements(num),
427 untagged_array(static_cast<E*>(malloc(ByteSize()))),
428 tagged_array(main_isolate()->factory()->NewByteArray(
429 static_cast<int>(ByteSize()))) {
430 Reinitialize();
431 }
432
433 ~AccessTester() { free(untagged_array); }
434
435 size_t ByteSize() { return num_elements * sizeof(E); }
436
437 // Nuke both {untagged_array} and {tagged_array} with {original_elements}.
438 void Reinitialize() {
439 memcpy(untagged_array, original_elements, ByteSize());
440 CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
441 E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
442 memcpy(raw, original_elements, ByteSize());
443 }
444
445 // Create and run code that copies the element in either {untagged_array}
446 // or {tagged_array} at index {from_index} to index {to_index}.
447 void RunCopyElement(int from_index, int to_index) {
448 // TODO(titzer): test element and field accesses where the base is not
449 // a constant in the code.
450 BoundsCheck(from_index);
451 BoundsCheck(to_index);
452 ElementAccess access = GetElementAccess();
453
454 SimplifiedLoweringTester<Object*> t;
455 Node* ptr = GetBaseNode(&t);
456 Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index));
457 t.StoreElement(access, ptr, t.Int32Constant(to_index), load);
458 t.Return(t.jsgraph.TrueConstant());
459 t.LowerAllNodesAndLowerChanges();
460 t.GenerateCode();
461
462 Object* result = t.Call();
463 CHECK_EQ(t.isolate()->heap()->true_value(), result);
464 }
465
466 // Create and run code that copies the field in either {untagged_array}
467 // or {tagged_array} at index {from_index} to index {to_index}.
468 void RunCopyField(int from_index, int to_index) {
469 BoundsCheck(from_index);
470 BoundsCheck(to_index);
471 FieldAccess from_access = GetFieldAccess(from_index);
472 FieldAccess to_access = GetFieldAccess(to_index);
473
474 SimplifiedLoweringTester<Object*> t;
475 Node* ptr = GetBaseNode(&t);
476 Node* load = t.LoadField(from_access, ptr);
477 t.StoreField(to_access, ptr, load);
478 t.Return(t.jsgraph.TrueConstant());
479 t.LowerAllNodesAndLowerChanges();
480 t.GenerateCode();
481
482 Object* result = t.Call();
483 CHECK_EQ(t.isolate()->heap()->true_value(), result);
484 }
485
486 // Create and run code that copies the elements from {this} to {that}.
487 void RunCopyElements(AccessTester<E>* that) {
488 // TODO(titzer): Rewrite this test without StructuredGraphBuilder support.
489 #if 0
490 SimplifiedLoweringTester<Object*> t;
491
492 Node* one = t.Int32Constant(1);
493 Node* index = t.Int32Constant(0);
494 Node* limit = t.Int32Constant(static_cast<int>(num_elements));
495 t.environment()->Push(index);
496 Node* src = this->GetBaseNode(&t);
497 Node* dst = that->GetBaseNode(&t);
498 {
499 LoopBuilder loop(&t);
500 loop.BeginLoop();
501 // Loop exit condition
502 index = t.environment()->Top();
503 Node* condition = t.Int32LessThan(index, limit);
504 loop.BreakUnless(condition);
505 // dst[index] = src[index]
506 index = t.environment()->Pop();
507 Node* load = t.LoadElement(this->GetElementAccess(), src, index);
508 t.StoreElement(that->GetElementAccess(), dst, index, load);
509 // index++
510 index = t.Int32Add(index, one);
511 t.environment()->Push(index);
512 // continue
513 loop.EndBody();
514 loop.EndLoop();
515 }
516 index = t.environment()->Pop();
517 t.Return(t.jsgraph.TrueConstant());
518 t.LowerAllNodes();
519 t.GenerateCode();
520
521 Object* result = t.Call();
522 CHECK_EQ(t.isolate()->heap()->true_value(), result);
523 #endif
524 }
525
526 E GetElement(int index) {
527 BoundsCheck(index);
528 if (tagged) {
529 return GetTaggedElement(index);
530 } else {
531 return untagged_array[index];
532 }
533 }
534
535 private:
536 ElementAccess GetElementAccess() {
537 ElementAccess access = {tagged ? kTaggedBase : kUntaggedBase,
538 tagged ? FixedArrayBase::kHeaderSize : 0,
539 Type::Any(), rep, kFullWriteBarrier};
540 return access;
541 }
542
543 FieldAccess GetFieldAccess(int field) {
544 int offset = field * sizeof(E);
545 FieldAccess access = {tagged ? kTaggedBase : kUntaggedBase,
546 offset + (tagged ? FixedArrayBase::kHeaderSize : 0),
547 Handle<Name>(),
548 Type::Any(),
549 rep,
550 kFullWriteBarrier};
551 return access;
552 }
553
554 template <typename T>
555 Node* GetBaseNode(SimplifiedLoweringTester<T>* t) {
556 return tagged ? t->HeapConstant(tagged_array)
557 : t->PointerConstant(untagged_array);
558 }
559
560 void BoundsCheck(int index) {
561 CHECK_GE(index, 0);
562 CHECK_LT(index, static_cast<int>(num_elements));
563 CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
564 }
565
566 E GetTaggedElement(int index) {
567 E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
568 return raw[index];
569 }
570 };
571
572 template <>
573 double AccessTester<double>::GetTaggedElement(int index) {
574 return ReadDoubleValue(tagged_array->GetDataStartAddress() +
575 index * sizeof(double));
576 }
577
578
579 template <typename E>
580 static void RunAccessTest(MachineType rep, E* original_elements, size_t num) {
581 int num_elements = static_cast<int>(num);
582
583 for (int taggedness = 0; taggedness < 2; taggedness++) {
584 AccessTester<E> a(taggedness == 1, rep, original_elements, num);
585 for (int field = 0; field < 2; field++) {
586 for (int i = 0; i < num_elements - 1; i++) {
587 a.Reinitialize();
588 if (field == 0) {
589 a.RunCopyField(i, i + 1); // Test field read/write.
590 } else {
591 a.RunCopyElement(i, i + 1); // Test element read/write.
592 }
593 for (int j = 0; j < num_elements; j++) {
594 E expect =
595 j == (i + 1) ? original_elements[i] : original_elements[j];
596 CHECK_EQ(expect, a.GetElement(j));
597 }
598 }
599 }
600 }
601 // Test array copy.
602 for (int tf = 0; tf < 2; tf++) {
603 for (int tt = 0; tt < 2; tt++) {
604 AccessTester<E> a(tf == 1, rep, original_elements, num);
605 AccessTester<E> b(tt == 1, rep, original_elements, num);
606 a.RunCopyElements(&b);
607 for (int i = 0; i < num_elements; i++) {
608 CHECK_EQ(a.GetElement(i), b.GetElement(i));
609 }
610 }
611 }
612 }
613
614
615 TEST(RunAccessTests_uint8) {
616 uint8_t data[] = {0x07, 0x16, 0x25, 0x34, 0x43, 0x99,
617 0xab, 0x78, 0x89, 0x19, 0x2b, 0x38};
618 RunAccessTest<uint8_t>(MachineType::Int8(), data, arraysize(data));
619 }
620
621
622 TEST(RunAccessTests_uint16) {
623 uint16_t data[] = {0x071a, 0x162b, 0x253c, 0x344d, 0x435e, 0x7777};
624 RunAccessTest<uint16_t>(MachineType::Int16(), data, arraysize(data));
625 }
626
627
628 TEST(RunAccessTests_int32) {
629 int32_t data[] = {-211, 211, 628347, 2000000000, -2000000000, -1, -100000034};
630 RunAccessTest<int32_t>(MachineType::Int32(), data, arraysize(data));
631 }
632
633
634 #define V8_2PART_INT64(a, b) (((static_cast<int64_t>(a) << 32) + 0x##b##u))
635
636
637 TEST(RunAccessTests_int64) {
638 if (kPointerSize != 8) return;
639 int64_t data[] = {V8_2PART_INT64(0x10111213, 14151617),
640 V8_2PART_INT64(0x20212223, 24252627),
641 V8_2PART_INT64(0x30313233, 34353637),
642 V8_2PART_INT64(0xa0a1a2a3, a4a5a6a7),
643 V8_2PART_INT64(0xf0f1f2f3, f4f5f6f7)};
644 RunAccessTest<int64_t>(MachineType::Int64(), data, arraysize(data));
645 }
646
647
648 TEST(RunAccessTests_float64) {
649 double data[] = {1.25, -1.25, 2.75, 11.0, 11100.8};
650 RunAccessTest<double>(MachineType::Float64(), data, arraysize(data));
651 }
652
653
654 TEST(RunAccessTests_Smi) {
655 Smi* data[] = {Smi::FromInt(-1), Smi::FromInt(-9),
656 Smi::FromInt(0), Smi::FromInt(666),
657 Smi::FromInt(77777), Smi::FromInt(Smi::kMaxValue)};
658 RunAccessTest<Smi*>(MachineType::AnyTagged(), data, arraysize(data));
659 }
660
661
662 TEST(RunAllocate) {
663 PretenureFlag flag[] = {NOT_TENURED, TENURED};
664
665 for (size_t i = 0; i < arraysize(flag); i++) {
666 SimplifiedLoweringTester<HeapObject*> t;
667 FieldAccess access = AccessBuilder::ForMap();
668 Node* size = t.jsgraph.Constant(HeapNumber::kSize);
669 Node* alloc = t.NewNode(t.simplified()->Allocate(flag[i]), size);
670 Node* map = t.jsgraph.Constant(t.factory()->heap_number_map());
671 t.StoreField(access, alloc, map);
672 t.Return(alloc);
673
674 t.LowerAllNodesAndLowerChanges();
675 t.GenerateCode();
676
677 HeapObject* result = t.CallWithPotentialGC<HeapObject>();
678 CHECK(t.heap()->new_space()->Contains(result) || flag[i] == TENURED);
679 CHECK(t.heap()->old_space()->Contains(result) || flag[i] == NOT_TENURED);
680 CHECK(result->IsHeapNumber());
681 }
682 }
683
684
685 // Fills in most of the nodes of the graph in order to make tests shorter.
686 class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
687 public:
688 Typer* typer = nullptr;
689 JSOperatorBuilder javascript;
690 JSGraph jsgraph;
691 Node* p0;
692 Node* p1;
693 Node* p2;
694 Node* start;
695 Node* end;
696 Node* ret;
697
698 explicit TestingGraph(Type* p0_type, Type* p1_type = Type::None(),
699 Type* p2_type = Type::None())
700 : GraphAndBuilders(main_zone()),
701 typer(new Typer(main_isolate(), graph())),
702 javascript(main_zone()),
703 jsgraph(main_isolate(), graph(), common(), &javascript, simplified(),
704 machine()) {
705 start = graph()->NewNode(common()->Start(4));
706 graph()->SetStart(start);
707 ret =
708 graph()->NewNode(common()->Return(), jsgraph.Constant(0), start, start);
709 end = graph()->NewNode(common()->End(1), ret);
710 graph()->SetEnd(end);
711 p0 = graph()->NewNode(common()->Parameter(0), start);
712 p1 = graph()->NewNode(common()->Parameter(1), start);
713 p2 = graph()->NewNode(common()->Parameter(2), start);
714 typer->Run();
715 NodeProperties::SetType(p0, p0_type);
716 NodeProperties::SetType(p1, p1_type);
717 NodeProperties::SetType(p2, p2_type);
718 }
719 ~TestingGraph() { delete typer; }
720
721 void CheckLoweringBinop(IrOpcode::Value expected, const Operator* op) {
722 Node* node = Return(graph()->NewNode(op, p0, p1));
723 Lower();
724 CHECK_EQ(expected, node->opcode());
725 }
726
727 void CheckLoweringStringBinop(IrOpcode::Value expected, const Operator* op) {
728 Node* node = Return(
729 graph()->NewNode(op, p0, p1, graph()->start(), graph()->start()));
730 Lower();
731 CHECK_EQ(expected, node->opcode());
732 }
733
734 void CheckLoweringTruncatedBinop(IrOpcode::Value expected, const Operator* op,
735 const Operator* trunc) {
736 Node* node = graph()->NewNode(op, p0, p1);
737 Return(graph()->NewNode(trunc, node));
738 Lower();
739 CHECK_EQ(expected, node->opcode());
740 }
741
742 void Lower() {
743 delete typer;
744 SourcePositionTable table(jsgraph.graph());
745 SimplifiedLowering(&jsgraph, jsgraph.zone(), &table).LowerAllNodes();
746 typer = new Typer(main_isolate(), graph());
747 }
748
749 void LowerAllNodesAndLowerChanges() {
750 delete typer;
751 SourcePositionTable table(jsgraph.graph());
752 SimplifiedLowering(&jsgraph, jsgraph.zone(), &table).LowerAllNodes();
753
754 Schedule* schedule = Scheduler::ComputeSchedule(this->zone(), this->graph(),
755 Scheduler::kNoFlags);
756 EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone());
757 linearizer.Run();
758
759 MemoryOptimizer memory_optimizer(&jsgraph, this->zone());
760 memory_optimizer.Optimize();
761 typer = new Typer(main_isolate(), graph());
762 }
763
764 // Inserts the node as the return value of the graph.
765 Node* Return(Node* node) {
766 ret->ReplaceInput(0, node);
767 return node;
768 }
769
770 // Inserts the node as the effect input to the return of the graph.
771 void Effect(Node* node) { ret->ReplaceInput(1, node); }
772
773 Node* ExampleWithOutput(MachineType type) {
774 if (type.semantic() == MachineSemantic::kInt32) {
775 return graph()->NewNode(machine()->Int32Add(), jsgraph.Int32Constant(1),
776 jsgraph.Int32Constant(1));
777 } else if (type.semantic() == MachineSemantic::kUint32) {
778 return graph()->NewNode(machine()->Word32Shr(), jsgraph.Int32Constant(1),
779 jsgraph.Int32Constant(1));
780 } else if (type.representation() == MachineRepresentation::kFloat64) {
781 return graph()->NewNode(machine()->Float64Add(),
782 jsgraph.Float64Constant(1),
783 jsgraph.Float64Constant(1));
784 } else if (type.representation() == MachineRepresentation::kBit) {
785 return graph()->NewNode(machine()->Word32Equal(),
786 jsgraph.Int32Constant(1),
787 jsgraph.Int32Constant(1));
788 } else if (type.representation() == MachineRepresentation::kWord64) {
789 return graph()->NewNode(machine()->Int64Add(), Int64Constant(1),
790 Int64Constant(1));
791 } else {
792 CHECK(type.representation() == MachineRepresentation::kTagged);
793 return p0;
794 }
795 }
796
797 Node* Use(Node* node, MachineType type) {
798 if (type.semantic() == MachineSemantic::kInt32) {
799 return graph()->NewNode(machine()->Int32LessThan(), node,
800 jsgraph.Int32Constant(1));
801 } else if (type.semantic() == MachineSemantic::kUint32) {
802 return graph()->NewNode(machine()->Uint32LessThan(), node,
803 jsgraph.Int32Constant(1));
804 } else if (type.representation() == MachineRepresentation::kFloat64) {
805 return graph()->NewNode(machine()->Float64Add(), node,
806 jsgraph.Float64Constant(1));
807 } else if (type.representation() == MachineRepresentation::kWord64) {
808 return graph()->NewNode(machine()->Int64LessThan(), node,
809 Int64Constant(1));
810 } else if (type.representation() == MachineRepresentation::kWord32) {
811 return graph()->NewNode(machine()->Word32Equal(), node,
812 jsgraph.Int32Constant(1));
813 } else {
814 return graph()->NewNode(simplified()->ReferenceEqual(), node,
815 jsgraph.TrueConstant());
816 }
817 }
818
819 Node* Branch(Node* cond) {
820 Node* br = graph()->NewNode(common()->Branch(), cond, start);
821 Node* tb = graph()->NewNode(common()->IfTrue(), br);
822 Node* fb = graph()->NewNode(common()->IfFalse(), br);
823 Node* m = graph()->NewNode(common()->Merge(2), tb, fb);
824 NodeProperties::ReplaceControlInput(ret, m);
825 return br;
826 }
827
828 Node* Int64Constant(int64_t v) {
829 return graph()->NewNode(common()->Int64Constant(v));
830 }
831
832 SimplifiedOperatorBuilder* simplified() { return &main_simplified_; }
833 MachineOperatorBuilder* machine() { return &main_machine_; }
834 CommonOperatorBuilder* common() { return &main_common_; }
835 Graph* graph() { return main_graph_; }
836 };
837
838
839 TEST(LowerBooleanNot_bit_bit) {
840 // BooleanNot(x: kRepBit) used as kRepBit
841 TestingGraph t(Type::Boolean());
842 Node* b = t.ExampleWithOutput(MachineType::Bool());
843 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
844 Node* use = t.Branch(inv);
845 t.Lower();
846 Node* cmp = use->InputAt(0);
847 CHECK_EQ(t.machine()->Word32Equal()->opcode(), cmp->opcode());
848 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
849 Node* f = t.jsgraph.Int32Constant(0);
850 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
851 }
852
853
854 TEST(LowerBooleanNot_bit_tagged) {
855 // BooleanNot(x: kRepBit) used as kRepTagged
856 TestingGraph t(Type::Boolean());
857 Node* b = t.ExampleWithOutput(MachineType::Bool());
858 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
859 Node* use = t.Use(inv, MachineType::AnyTagged());
860 t.Return(use);
861 t.Lower();
862 CHECK_EQ(IrOpcode::kChangeBitToTagged, use->InputAt(0)->opcode());
863 Node* cmp = use->InputAt(0)->InputAt(0);
864 CHECK_EQ(t.machine()->Word32Equal()->opcode(), cmp->opcode());
865 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
866 Node* f = t.jsgraph.Int32Constant(0);
867 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
868 }
869
870
871 TEST(LowerBooleanNot_tagged_bit) {
872 // BooleanNot(x: kRepTagged) used as kRepBit
873 TestingGraph t(Type::Boolean());
874 Node* b = t.p0;
875 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
876 Node* use = t.Branch(inv);
877 t.Lower();
878 Node* cmp = use->InputAt(0);
879 CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
880 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
881 Node* f = t.jsgraph.FalseConstant();
882 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
883 }
884
885
886 TEST(LowerBooleanNot_tagged_tagged) {
887 // BooleanNot(x: kRepTagged) used as kRepTagged
888 TestingGraph t(Type::Boolean());
889 Node* b = t.p0;
890 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
891 Node* use = t.Use(inv, MachineType::AnyTagged());
892 t.Return(use);
893 t.Lower();
894 CHECK_EQ(IrOpcode::kChangeBitToTagged, use->InputAt(0)->opcode());
895 Node* cmp = use->InputAt(0)->InputAt(0);
896 CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
897 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
898 Node* f = t.jsgraph.FalseConstant();
899 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
900 }
901
902 static Type* test_types[] = {Type::Signed32(), Type::Unsigned32(),
903 Type::Number()};
904
905 TEST(LowerNumberCmp_to_int32) {
906 TestingGraph t(Type::Signed32(), Type::Signed32());
907
908 t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
909 t.CheckLoweringBinop(IrOpcode::kInt32LessThan,
910 t.simplified()->NumberLessThan());
911 t.CheckLoweringBinop(IrOpcode::kInt32LessThanOrEqual,
912 t.simplified()->NumberLessThanOrEqual());
913 }
914
915
916 TEST(LowerNumberCmp_to_uint32) {
917 TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
918
919 t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
920 t.CheckLoweringBinop(IrOpcode::kUint32LessThan,
921 t.simplified()->NumberLessThan());
922 t.CheckLoweringBinop(IrOpcode::kUint32LessThanOrEqual,
923 t.simplified()->NumberLessThanOrEqual());
924 }
925
926
927 TEST(LowerNumberCmp_to_float64) {
928 TestingGraph t(Type::Number(), Type::Number());
929
930 t.CheckLoweringBinop(IrOpcode::kFloat64Equal, t.simplified()->NumberEqual());
931 t.CheckLoweringBinop(IrOpcode::kFloat64LessThan,
932 t.simplified()->NumberLessThan());
933 t.CheckLoweringBinop(IrOpcode::kFloat64LessThanOrEqual,
934 t.simplified()->NumberLessThanOrEqual());
935 }
936
937
938 TEST(LowerNumberAddSub_to_int32) {
939 HandleAndZoneScope scope;
940 Type* small_range = Type::Range(1, 10, scope.main_zone());
941 Type* large_range = Type::Range(-1e+13, 1e+14, scope.main_zone());
942 static Type* types[] = {Type::Signed32(), Type::Integral32(), small_range,
943 large_range};
944
945 for (size_t i = 0; i < arraysize(types); i++) {
946 for (size_t j = 0; j < arraysize(types); j++) {
947 TestingGraph t(types[i], types[j]);
948 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
949 t.simplified()->NumberAdd(),
950 t.simplified()->NumberToInt32());
951 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
952 t.simplified()->NumberSubtract(),
953 t.simplified()->NumberToInt32());
954 }
955 }
956 }
957
958
959 TEST(LowerNumberAddSub_to_uint32) {
960 HandleAndZoneScope scope;
961 Type* small_range = Type::Range(1, 10, scope.main_zone());
962 Type* large_range = Type::Range(-1e+13, 1e+14, scope.main_zone());
963 static Type* types[] = {Type::Signed32(), Type::Integral32(), small_range,
964 large_range};
965
966 for (size_t i = 0; i < arraysize(types); i++) {
967 for (size_t j = 0; j < arraysize(types); j++) {
968 TestingGraph t(types[i], types[j]);
969 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
970 t.simplified()->NumberAdd(),
971 t.simplified()->NumberToUint32());
972 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
973 t.simplified()->NumberSubtract(),
974 t.simplified()->NumberToUint32());
975 }
976 }
977 }
978
979
980 TEST(LowerNumberAddSub_to_float64) {
981 for (size_t i = 0; i < arraysize(test_types); i++) {
982 TestingGraph t(test_types[i], test_types[i]);
983
984 t.CheckLoweringBinop(IrOpcode::kFloat64Add, t.simplified()->NumberAdd());
985 t.CheckLoweringBinop(IrOpcode::kFloat64Sub,
986 t.simplified()->NumberSubtract());
987 }
988 }
989
990
991 TEST(LowerNumberDivMod_to_float64) {
992 for (size_t i = 0; i < arraysize(test_types); i++) {
993 TestingGraph t(test_types[i], test_types[i]);
994
995 t.CheckLoweringBinop(IrOpcode::kFloat64Div, t.simplified()->NumberDivide());
996 if (!test_types[i]->Is(Type::Unsigned32())) {
997 t.CheckLoweringBinop(IrOpcode::kFloat64Mod,
998 t.simplified()->NumberModulus());
999 }
1000 }
1001 }
1002
1003
1004 static void CheckChangeOf(IrOpcode::Value change, Node* of, Node* node) {
1005 CHECK_EQ(change, node->opcode());
1006 CHECK_EQ(of, node->InputAt(0));
1007 }
1008
1009
1010 TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
1011 // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepWord32
1012 TestingGraph t(Type::Signed32());
1013 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
1014 Node* use = t.Use(trunc, MachineType::Int32());
1015 t.Return(use);
1016 t.Lower();
1017 CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p0, use->InputAt(0));
1018 }
1019
1020 TEST(LowerNumberToInt32_to_TruncateFloat64ToWord32) {
1021 // NumberToInt32(x: kRepFloat64) used as MachineType::Int32()
1022 TestingGraph t(Type::Number());
1023 Node* p0 = t.ExampleWithOutput(MachineType::Float64());
1024 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), p0);
1025 Node* use = t.Use(trunc, MachineType::Int32());
1026 t.Return(use);
1027 t.Lower();
1028 CheckChangeOf(IrOpcode::kTruncateFloat64ToWord32, p0, use->InputAt(0));
1029 }
1030
1031 TEST(LowerNumberToInt32_to_TruncateTaggedToWord32) {
1032 // NumberToInt32(x: kTypeNumber | kRepTagged) used as MachineType::Int32()
1033 TestingGraph t(Type::Number());
1034 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
1035 Node* use = t.Use(trunc, MachineType::Int32());
1036 t.Return(use);
1037 t.Lower();
1038 CheckChangeOf(IrOpcode::kTruncateTaggedToWord32, t.p0, use->InputAt(0));
1039 }
1040
1041
1042 TEST(LowerNumberToUint32_to_ChangeTaggedToUint32) {
1043 // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepWord32
1044 TestingGraph t(Type::Unsigned32());
1045 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
1046 Node* use = t.Use(trunc, MachineType::Uint32());
1047 t.Return(use);
1048 t.Lower();
1049 CheckChangeOf(IrOpcode::kChangeTaggedToUint32, t.p0, use->InputAt(0));
1050 }
1051
1052 TEST(LowerNumberToUint32_to_TruncateFloat64ToWord32) {
1053 // NumberToUint32(x: kRepFloat64) used as MachineType::Uint32()
1054 TestingGraph t(Type::Number());
1055 Node* p0 = t.ExampleWithOutput(MachineType::Float64());
1056 // TODO(titzer): run the typer here, or attach machine type to param.
1057 NodeProperties::SetType(p0, Type::Number());
1058 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), p0);
1059 Node* use = t.Use(trunc, MachineType::Uint32());
1060 t.Return(use);
1061 t.Lower();
1062 CheckChangeOf(IrOpcode::kTruncateFloat64ToWord32, p0, use->InputAt(0));
1063 }
1064
1065 TEST(LowerNumberToUint32_to_TruncateTaggedToWord32) {
1066 // NumberToInt32(x: kTypeNumber | kRepTagged) used as MachineType::Uint32()
1067 TestingGraph t(Type::Number());
1068 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
1069 Node* use = t.Use(trunc, MachineType::Uint32());
1070 t.Return(use);
1071 t.Lower();
1072 CheckChangeOf(IrOpcode::kTruncateTaggedToWord32, t.p0, use->InputAt(0));
1073 }
1074
1075 TEST(LowerNumberToUint32_to_TruncateFloat64ToWord32_uint32) {
1076 // NumberToUint32(x: kRepFloat64) used as kRepWord32
1077 TestingGraph t(Type::Unsigned32());
1078 Node* input = t.ExampleWithOutput(MachineType::Float64());
1079 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), input);
1080 Node* use = t.Use(trunc, MachineType::RepWord32());
1081 t.Return(use);
1082 t.Lower();
1083 CheckChangeOf(IrOpcode::kTruncateFloat64ToWord32, input, use->InputAt(0));
1084 }
1085
1086
1087 TEST(LowerReferenceEqual_to_wordeq) {
1088 TestingGraph t(Type::Any(), Type::Any());
1089 IrOpcode::Value opcode =
1090 static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
1091 t.CheckLoweringBinop(opcode, t.simplified()->ReferenceEqual());
1092 }
1093
1094 void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
1095 MachineType to, Type* type = Type::Any()) {
1096 TestingGraph t(Type::Any());
1097 Node* in = t.ExampleWithOutput(from);
1098 NodeProperties::SetType(in, type);
1099 Node* use = t.Use(in, to);
1100 t.Return(use);
1101 t.Lower();
1102 CHECK_EQ(expected, use->InputAt(0)->opcode());
1103 CHECK_EQ(in, use->InputAt(0)->InputAt(0));
1104 }
1105
1106 TEST(InsertBasicChanges) {
1107 CheckChangeInsertion(IrOpcode::kChangeFloat64ToInt32, MachineType::Float64(),
1108 MachineType::Int32(), Type::Signed32());
1109 CheckChangeInsertion(IrOpcode::kChangeFloat64ToUint32, MachineType::Float64(),
1110 MachineType::Uint32(), Type::Unsigned32());
1111 CheckChangeInsertion(IrOpcode::kTruncateFloat64ToWord32,
1112 MachineType::Float64(), MachineType::Uint32(),
1113 Type::Integral32());
1114 CheckChangeInsertion(IrOpcode::kChangeTaggedToInt32, MachineType::AnyTagged(),
1115 MachineType::Int32(), Type::Signed32());
1116 CheckChangeInsertion(IrOpcode::kChangeTaggedToUint32,
1117 MachineType::AnyTagged(), MachineType::Uint32(),
1118 Type::Unsigned32());
1119
1120 CheckChangeInsertion(IrOpcode::kChangeFloat64ToTagged, MachineType::Float64(),
1121 MachineType::AnyTagged(), Type::Number());
1122 CheckChangeInsertion(IrOpcode::kChangeTaggedToFloat64,
1123 MachineType::AnyTagged(), MachineType::Float64(),
1124 Type::Number());
1125
1126 CheckChangeInsertion(IrOpcode::kChangeInt32ToFloat64, MachineType::Int32(),
1127 MachineType::Float64(), Type::Signed32());
1128 CheckChangeInsertion(IrOpcode::kChangeInt32ToTagged, MachineType::Int32(),
1129 MachineType::AnyTagged(), Type::Signed32());
1130
1131 CheckChangeInsertion(IrOpcode::kChangeUint32ToFloat64, MachineType::Uint32(),
1132 MachineType::Float64(), Type::Unsigned32());
1133 CheckChangeInsertion(IrOpcode::kChangeUint32ToTagged, MachineType::Uint32(),
1134 MachineType::AnyTagged(), Type::Unsigned32());
1135 }
1136
1137 static void CheckChangesAroundBinop(TestingGraph* t, const Operator* op,
1138 IrOpcode::Value input_change,
1139 IrOpcode::Value output_change, Type* type) {
1140 Node* binop =
1141 op->ControlInputCount() == 0
1142 ? t->graph()->NewNode(op, t->p0, t->p1)
1143 : t->graph()->NewNode(op, t->p0, t->p1, t->graph()->start());
1144 NodeProperties::SetType(binop, type);
1145 t->Return(binop);
1146 t->Lower();
1147 CHECK_EQ(input_change, binop->InputAt(0)->opcode());
1148 CHECK_EQ(input_change, binop->InputAt(1)->opcode());
1149 CHECK_EQ(t->p0, binop->InputAt(0)->InputAt(0));
1150 CHECK_EQ(t->p1, binop->InputAt(1)->InputAt(0));
1151 CHECK_EQ(output_change, t->ret->InputAt(0)->opcode());
1152 CHECK_EQ(binop, t->ret->InputAt(0)->InputAt(0));
1153 }
1154
1155
1156 TEST(InsertChangesAroundInt32Binops) {
1157 TestingGraph t(Type::Signed32(), Type::Signed32());
1158
1159 const Operator* ops[] = {t.machine()->Int32Add(), t.machine()->Int32Sub(),
1160 t.machine()->Int32Mul(), t.machine()->Int32Div(),
1161 t.machine()->Int32Mod(), t.machine()->Word32And(),
1162 t.machine()->Word32Or(), t.machine()->Word32Xor(),
1163 t.machine()->Word32Shl(), t.machine()->Word32Sar()};
1164
1165 for (size_t i = 0; i < arraysize(ops); i++) {
1166 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
1167 IrOpcode::kChangeInt32ToTagged, Type::Signed32());
1168 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
1169 IrOpcode::kChangeInt32ToTagged, Type::Signed32());
1170 }
1171 }
1172
1173
1174 TEST(InsertChangesAroundInt32Cmp) {
1175 TestingGraph t(Type::Signed32(), Type::Signed32());
1176
1177 const Operator* ops[] = {t.machine()->Int32LessThan(),
1178 t.machine()->Int32LessThanOrEqual()};
1179
1180 for (size_t i = 0; i < arraysize(ops); i++) {
1181 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
1182 IrOpcode::kChangeBitToTagged, Type::Boolean());
1183 }
1184 }
1185
1186
1187 TEST(InsertChangesAroundUint32Cmp) {
1188 TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
1189
1190 const Operator* ops[] = {t.machine()->Uint32LessThan(),
1191 t.machine()->Uint32LessThanOrEqual()};
1192
1193 for (size_t i = 0; i < arraysize(ops); i++) {
1194 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToUint32,
1195 IrOpcode::kChangeBitToTagged, Type::Boolean());
1196 }
1197 }
1198
1199
1200 TEST(InsertChangesAroundFloat64Binops) {
1201 TestingGraph t(Type::Number(), Type::Number());
1202
1203 const Operator* ops[] = {
1204 t.machine()->Float64Add(), t.machine()->Float64Sub(),
1205 t.machine()->Float64Mul(), t.machine()->Float64Div(),
1206 t.machine()->Float64Mod(),
1207 };
1208
1209 for (size_t i = 0; i < arraysize(ops); i++) {
1210 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
1211 IrOpcode::kChangeFloat64ToTagged, Type::Number());
1212 }
1213 }
1214
1215
1216 TEST(InsertChangesAroundFloat64Cmp) {
1217 TestingGraph t(Type::Number(), Type::Number());
1218
1219 const Operator* ops[] = {t.machine()->Float64Equal(),
1220 t.machine()->Float64LessThan(),
1221 t.machine()->Float64LessThanOrEqual()};
1222
1223 for (size_t i = 0; i < arraysize(ops); i++) {
1224 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
1225 IrOpcode::kChangeBitToTagged, Type::Boolean());
1226 }
1227 }
1228
1229
1230 namespace {
1231
1232 void CheckFieldAccessArithmetic(FieldAccess access, Node* load_or_store) {
1233 IntPtrMatcher mindex(load_or_store->InputAt(1));
1234 CHECK(mindex.Is(access.offset - access.tag()));
1235 }
1236
1237
1238 Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
1239 Node* index = load_or_store->InputAt(1);
1240 if (kPointerSize == 8) {
1241 Int64BinopMatcher mindex(index);
1242 CHECK_EQ(IrOpcode::kInt64Add, mindex.node()->opcode());
1243 CHECK(mindex.right().Is(access.header_size - access.tag()));
1244
1245 const int element_size_shift =
1246 ElementSizeLog2Of(access.machine_type.representation());
1247 Node* index;
1248 if (element_size_shift) {
1249 Int64BinopMatcher shl(mindex.left().node());
1250 CHECK_EQ(IrOpcode::kWord64Shl, shl.node()->opcode());
1251 CHECK(shl.right().Is(element_size_shift));
1252 index = shl.left().node();
1253 } else {
1254 index = mindex.left().node();
1255 }
1256 CHECK_EQ(IrOpcode::kChangeUint32ToUint64, index->opcode());
1257 return index->InputAt(0);
1258 } else {
1259 Int32BinopMatcher mindex(index);
1260 CHECK_EQ(IrOpcode::kInt32Add, mindex.node()->opcode());
1261 CHECK(mindex.right().Is(access.header_size - access.tag()));
1262
1263 const int element_size_shift =
1264 ElementSizeLog2Of(access.machine_type.representation());
1265 if (element_size_shift) {
1266 Int32BinopMatcher shl(mindex.left().node());
1267 CHECK_EQ(IrOpcode::kWord32Shl, shl.node()->opcode());
1268 CHECK(shl.right().Is(element_size_shift));
1269 return shl.left().node();
1270 } else {
1271 return mindex.left().node();
1272 }
1273 }
1274 }
1275
1276
1277 const MachineType kMachineReps[] = {
1278 MachineType::Int8(), MachineType::Int16(), MachineType::Int32(),
1279 MachineType::Uint32(), MachineType::Int64(), MachineType::Float64(),
1280 MachineType::AnyTagged()};
1281
1282 } // namespace
1283
1284
1285 TEST(LowerLoadField_to_load) {
1286 for (size_t i = 0; i < arraysize(kMachineReps); i++) {
1287 TestingGraph t(Type::Any(), Type::Signed32());
1288 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1289 Handle<Name>::null(), Type::Any(),
1290 kMachineReps[i], kNoWriteBarrier};
1291
1292 Node* load = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
1293 t.start, t.start);
1294 Node* use = t.Use(load, kMachineReps[i]);
1295 t.Return(use);
1296 t.LowerAllNodesAndLowerChanges();
1297 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1298 CHECK_EQ(t.p0, load->InputAt(0));
1299 CheckFieldAccessArithmetic(access, load);
1300
1301 MachineType rep = LoadRepresentationOf(load->op());
1302 CHECK_EQ(kMachineReps[i], rep);
1303 }
1304 }
1305
1306
1307 TEST(LowerStoreField_to_store) {
1308 {
1309 TestingGraph t(Type::Any(), Type::Signed32());
1310
1311 for (size_t i = 0; i < arraysize(kMachineReps); i++) {
1312 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1313 Handle<Name>::null(), Type::Any(),
1314 kMachineReps[i], kNoWriteBarrier};
1315
1316 Node* val = t.ExampleWithOutput(kMachineReps[i]);
1317 Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
1318 val, t.start, t.start);
1319 t.Effect(store);
1320 t.LowerAllNodesAndLowerChanges();
1321 CHECK_EQ(IrOpcode::kStore, store->opcode());
1322 CHECK_EQ(val, store->InputAt(2));
1323 CheckFieldAccessArithmetic(access, store);
1324
1325 StoreRepresentation rep = StoreRepresentationOf(store->op());
1326 if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
1327 CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
1328 }
1329 CHECK_EQ(kMachineReps[i].representation(), rep.representation());
1330 }
1331 }
1332 {
1333 HandleAndZoneScope scope;
1334 Zone* z = scope.main_zone();
1335 TestingGraph t(Type::Any(), Type::Intersect(Type::SignedSmall(),
1336 Type::TaggedSigned(), z));
1337 FieldAccess access = {
1338 kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
1339 Type::Any(), MachineType::AnyTagged(), kNoWriteBarrier};
1340 Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
1341 t.p1, t.start, t.start);
1342 t.Effect(store);
1343 t.LowerAllNodesAndLowerChanges();
1344 CHECK_EQ(IrOpcode::kStore, store->opcode());
1345 CHECK_EQ(t.p1, store->InputAt(2));
1346 StoreRepresentation rep = StoreRepresentationOf(store->op());
1347 CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
1348 }
1349 }
1350
1351
1352 TEST(LowerLoadElement_to_load) {
1353 for (size_t i = 0; i < arraysize(kMachineReps); i++) {
1354 TestingGraph t(Type::Any(), Type::Signed32());
1355 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1356 Type::Any(), kMachineReps[i], kNoWriteBarrier};
1357
1358 Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
1359 t.p1, t.start, t.start);
1360 Node* use = t.Use(load, kMachineReps[i]);
1361 t.Return(use);
1362 t.LowerAllNodesAndLowerChanges();
1363 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1364 CHECK_EQ(t.p0, load->InputAt(0));
1365 CheckElementAccessArithmetic(access, load);
1366
1367 MachineType rep = LoadRepresentationOf(load->op());
1368 CHECK_EQ(kMachineReps[i], rep);
1369 }
1370 }
1371
1372
1373 TEST(LowerStoreElement_to_store) {
1374 {
1375 for (size_t i = 0; i < arraysize(kMachineReps); i++) {
1376 TestingGraph t(Type::Any(), Type::Signed32());
1377
1378 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1379 Type::Any(), kMachineReps[i], kNoWriteBarrier};
1380
1381 Node* val = t.ExampleWithOutput(kMachineReps[i]);
1382 Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access),
1383 t.p0, t.p1, val, t.start, t.start);
1384 t.Effect(store);
1385 t.LowerAllNodesAndLowerChanges();
1386 CHECK_EQ(IrOpcode::kStore, store->opcode());
1387 CHECK_EQ(val, store->InputAt(2));
1388 CheckElementAccessArithmetic(access, store);
1389
1390 StoreRepresentation rep = StoreRepresentationOf(store->op());
1391 if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
1392 CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
1393 }
1394 CHECK_EQ(kMachineReps[i].representation(), rep.representation());
1395 }
1396 }
1397 {
1398 HandleAndZoneScope scope;
1399 Zone* z = scope.main_zone();
1400 TestingGraph t(
1401 Type::Any(), Type::Signed32(),
1402 Type::Intersect(Type::SignedSmall(), Type::TaggedSigned(), z));
1403 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1404 Type::Any(), MachineType::AnyTagged(),
1405 kNoWriteBarrier};
1406 Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
1407 t.p1, t.p2, t.start, t.start);
1408 t.Effect(store);
1409 t.LowerAllNodesAndLowerChanges();
1410 CHECK_EQ(IrOpcode::kStore, store->opcode());
1411 CHECK_EQ(t.p2, store->InputAt(2));
1412 StoreRepresentation rep = StoreRepresentationOf(store->op());
1413 CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
1414 }
1415 }
1416
1417
1418 TEST(InsertChangeForLoadElementIndex) {
1419 // LoadElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length) =>
1420 // Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
1421 TestingGraph t(Type::Any(), Type::Signed32());
1422 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
1423 MachineType::AnyTagged(), kNoWriteBarrier};
1424
1425 Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
1426 t.p1, t.start, t.start);
1427 t.Return(load);
1428 t.Lower();
1429 CHECK_EQ(IrOpcode::kLoadElement, load->opcode());
1430 CHECK_EQ(t.p0, load->InputAt(0));
1431 CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, load->InputAt(1));
1432 }
1433
1434
1435 TEST(InsertChangeForStoreElementIndex) {
1436 // StoreElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length, val) =>
1437 // Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
1438 TestingGraph t(Type::Any(), Type::Signed32());
1439 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
1440 MachineType::AnyTagged(), kFullWriteBarrier};
1441
1442 Node* store =
1443 t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1,
1444 t.jsgraph.TrueConstant(), t.start, t.start);
1445 t.Effect(store);
1446 t.Lower();
1447 CHECK_EQ(IrOpcode::kStoreElement, store->opcode());
1448 CHECK_EQ(t.p0, store->InputAt(0));
1449 CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, store->InputAt(1));
1450 }
1451
1452
1453 TEST(InsertChangeForLoadElement) {
1454 // TODO(titzer): test all load/store representation change insertions.
1455 TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
1456 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1457 Type::Number(), MachineType::Float64(),
1458 kNoWriteBarrier};
1459
1460 Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
1461 t.p1, t.start, t.start);
1462 t.Return(load);
1463 t.Lower();
1464 CHECK_EQ(IrOpcode::kLoadElement, load->opcode());
1465 CHECK_EQ(t.p0, load->InputAt(0));
1466 CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
1467 }
1468
1469
1470 TEST(InsertChangeForLoadField) {
1471 // TODO(titzer): test all load/store representation change insertions.
1472 TestingGraph t(Type::Any(), Type::Signed32());
1473 FieldAccess access = {
1474 kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
1475 Type::Number(), MachineType::Float64(), kNoWriteBarrier};
1476
1477 Node* load = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
1478 t.start, t.start);
1479 t.Return(load);
1480 t.Lower();
1481 CHECK_EQ(IrOpcode::kLoadField, load->opcode());
1482 CHECK_EQ(t.p0, load->InputAt(0));
1483 CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
1484 }
1485
1486
1487 TEST(InsertChangeForStoreElement) {
1488 // TODO(titzer): test all load/store representation change insertions.
1489 TestingGraph t(Type::Any(), Type::Signed32());
1490 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
1491 MachineType::Float64(), kFullWriteBarrier};
1492
1493 Node* store =
1494 t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
1495 t.jsgraph.Int32Constant(0), t.p1, t.start, t.start);
1496 t.Effect(store);
1497 t.Lower();
1498
1499 CHECK_EQ(IrOpcode::kStoreElement, store->opcode());
1500 CHECK_EQ(t.p0, store->InputAt(0));
1501 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
1502 }
1503
1504
1505 TEST(InsertChangeForStoreField) {
1506 // TODO(titzer): test all load/store representation change insertions.
1507 TestingGraph t(Type::Any(), Type::Signed32());
1508 FieldAccess access = {
1509 kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
1510 Type::Any(), MachineType::Float64(), kNoWriteBarrier};
1511
1512 Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
1513 t.p1, t.start, t.start);
1514 t.Effect(store);
1515 t.Lower();
1516
1517 CHECK_EQ(IrOpcode::kStoreField, store->opcode());
1518 CHECK_EQ(t.p0, store->InputAt(0));
1519 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(1));
1520 }
1521
1522
1523 TEST(UpdatePhi) {
1524 TestingGraph t(Type::Any(), Type::Signed32());
1525 static const MachineType kMachineTypes[] = {
1526 MachineType::Int32(), MachineType::Uint32(), MachineType::Float64()};
1527 Type* kTypes[] = {Type::Signed32(), Type::Unsigned32(), Type::Number()};
1528
1529 for (size_t i = 0; i < arraysize(kMachineTypes); i++) {
1530 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1531 Handle<Name>::null(), kTypes[i],
1532 kMachineTypes[i], kFullWriteBarrier};
1533
1534 Node* load0 = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
1535 t.start, t.start);
1536 Node* load1 = t.graph()->NewNode(t.simplified()->LoadField(access), t.p1,
1537 t.start, t.start);
1538 Node* phi =
1539 t.graph()->NewNode(t.common()->Phi(MachineRepresentation::kTagged, 2),
1540 load0, load1, t.start);
1541 t.Return(t.Use(phi, kMachineTypes[i]));
1542 t.Lower();
1543
1544 CHECK_EQ(IrOpcode::kPhi, phi->opcode());
1545 CHECK_EQ(kMachineTypes[i].representation(), PhiRepresentationOf(phi->op()));
1546 }
1547 }
1548
1549
1550 TEST(NumberMultiply_ConstantOutOfRange) {
1551 TestingGraph t(Type::Signed32());
1552 Node* k = t.jsgraph.Constant(1000000023);
1553 Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
1554 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mul);
1555 t.Return(trunc);
1556 t.Lower();
1557
1558 CHECK_EQ(IrOpcode::kFloat64Mul, mul->opcode());
1559 }
1560
1561
1562 TEST(NumberMultiply_NonTruncating) {
1563 TestingGraph t(Type::Signed32());
1564 Node* k = t.jsgraph.Constant(111);
1565 Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
1566 t.Return(mul);
1567 t.Lower();
1568
1569 CHECK_EQ(IrOpcode::kFloat64Mul, mul->opcode());
1570 }
1571
1572
1573 TEST(NumberDivide_TruncatingToInt32) {
1574 int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
1575
1576 for (size_t i = 0; i < arraysize(constants); i++) {
1577 TestingGraph t(Type::Signed32());
1578 Node* k = t.jsgraph.Constant(constants[i]);
1579 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1580 Node* use = t.Use(div, MachineType::Int32());
1581 t.Return(use);
1582 t.Lower();
1583
1584 CHECK_EQ(IrOpcode::kInt32Div, use->InputAt(0)->opcode());
1585 }
1586 }
1587
1588
1589 TEST(NumberDivide_TruncatingToUint32) {
1590 double constants[] = {1, 3, 100, 1000, 100998348};
1591
1592 for (size_t i = 0; i < arraysize(constants); i++) {
1593 TestingGraph t(Type::Unsigned32());
1594 Node* k = t.jsgraph.Constant(constants[i]);
1595 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1596 Node* use = t.Use(div, MachineType::Uint32());
1597 t.Return(use);
1598 t.Lower();
1599
1600 CHECK_EQ(IrOpcode::kUint32Div, use->InputAt(0)->opcode());
1601 }
1602 }
1603
1604
1605 TEST(NumberDivide_BadConstants) {
1606 {
1607 TestingGraph t(Type::Signed32());
1608 Node* k = t.jsgraph.Constant(-1);
1609 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1610 Node* use = t.Use(div, MachineType::Int32());
1611 t.Return(use);
1612 t.Lower();
1613
1614 CHECK_EQ(IrOpcode::kInt32Sub, use->InputAt(0)->opcode());
1615 }
1616
1617 {
1618 TestingGraph t(Type::Signed32());
1619 Node* k = t.jsgraph.Constant(0);
1620 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1621 Node* use = t.Use(div, MachineType::Int32());
1622 t.Return(use);
1623 t.Lower();
1624
1625 CHECK_EQ(IrOpcode::kInt32Constant, use->InputAt(0)->opcode());
1626 CHECK_EQ(0, OpParameter<int32_t>(use->InputAt(0)));
1627 }
1628
1629 {
1630 TestingGraph t(Type::Unsigned32());
1631 Node* k = t.jsgraph.Constant(0);
1632 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1633 Node* use = t.Use(div, MachineType::Uint32());
1634 t.Return(use);
1635 t.Lower();
1636
1637 CHECK_EQ(IrOpcode::kInt32Constant, use->InputAt(0)->opcode());
1638 CHECK_EQ(0, OpParameter<int32_t>(use->InputAt(0)));
1639 }
1640 }
1641
1642
1643 TEST(NumberModulus_TruncatingToInt32) {
1644 int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
1645
1646 for (size_t i = 0; i < arraysize(constants); i++) {
1647 TestingGraph t(Type::Signed32());
1648 Node* k = t.jsgraph.Constant(constants[i]);
1649 Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
1650 Node* use = t.Use(mod, MachineType::Int32());
1651 t.Return(use);
1652 t.Lower();
1653
1654 CHECK_EQ(IrOpcode::kInt32Mod, use->InputAt(0)->opcode());
1655 }
1656 }
1657
1658
1659 TEST(NumberModulus_TruncatingToUint32) {
1660 double constants[] = {1, 3, 100, 1000, 100998348};
1661
1662 for (size_t i = 0; i < arraysize(constants); i++) {
1663 TestingGraph t(Type::Unsigned32());
1664 Node* k = t.jsgraph.Constant(constants[i]);
1665 Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
1666 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), mod);
1667 t.Return(trunc);
1668 t.Lower();
1669
1670 CHECK_EQ(IrOpcode::kUint32Mod, t.ret->InputAt(0)->InputAt(0)->opcode());
1671 }
1672 }
1673
1674
1675 TEST(NumberModulus_Int32) {
1676 int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
1677
1678 for (size_t i = 0; i < arraysize(constants); i++) {
1679 TestingGraph t(Type::Signed32());
1680 Node* k = t.jsgraph.Constant(constants[i]);
1681 Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
1682 t.Return(mod);
1683 t.Lower();
1684
1685 CHECK_EQ(IrOpcode::kFloat64Mod, mod->opcode()); // Pesky -0 behavior.
1686 }
1687 }
1688
1689
1690 TEST(NumberModulus_Uint32) {
1691 const double kConstants[] = {2, 100, 1000, 1024, 2048};
1692 const MachineType kTypes[] = {MachineType::Int32(), MachineType::Uint32()};
1693
1694 for (auto const type : kTypes) {
1695 for (auto const c : kConstants) {
1696 TestingGraph t(Type::Unsigned32());
1697 Node* k = t.jsgraph.Constant(c);
1698 Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
1699 Node* use = t.Use(mod, type);
1700 t.Return(use);
1701 t.Lower();
1702
1703 CHECK_EQ(IrOpcode::kUint32Mod, use->InputAt(0)->opcode());
1704 }
1705 }
1706 }
1707
1708
1709 TEST(PhiRepresentation) {
1710 HandleAndZoneScope scope;
1711 Zone* z = scope.main_zone();
1712
1713 struct TestData {
1714 Type* arg1;
1715 Type* arg2;
1716 MachineType use;
1717 MachineRepresentation expected;
1718 };
1719
1720 TestData test_data[] = {
1721 {Type::Signed32(), Type::Unsigned32(), MachineType::Int32(),
1722 MachineRepresentation::kWord32},
1723 {Type::Signed32(), Type::Unsigned32(), MachineType::Uint32(),
1724 MachineRepresentation::kWord32},
1725 {Type::Signed32(), Type::Signed32(), MachineType::Int32(),
1726 MachineRepresentation::kWord32},
1727 {Type::Unsigned32(), Type::Unsigned32(), MachineType::Int32(),
1728 MachineRepresentation::kWord32},
1729 {Type::Number(), Type::Signed32(), MachineType::Int32(),
1730 MachineRepresentation::kWord32}};
1731
1732 for (auto const d : test_data) {
1733 TestingGraph t(d.arg1, d.arg2, Type::Boolean());
1734
1735 Node* br = t.graph()->NewNode(t.common()->Branch(), t.p2, t.start);
1736 Node* tb = t.graph()->NewNode(t.common()->IfTrue(), br);
1737 Node* fb = t.graph()->NewNode(t.common()->IfFalse(), br);
1738 Node* m = t.graph()->NewNode(t.common()->Merge(2), tb, fb);
1739
1740 Node* phi = t.graph()->NewNode(
1741 t.common()->Phi(MachineRepresentation::kTagged, 2), t.p0, t.p1, m);
1742
1743 Type* phi_type = Type::Union(d.arg1, d.arg2, z);
1744 NodeProperties::SetType(phi, phi_type);
1745
1746 Node* use = t.Use(phi, d.use);
1747 t.Return(use);
1748 t.Lower();
1749
1750 CHECK_EQ(d.expected, PhiRepresentationOf(phi->op()));
1751 }
1752 }
1753
1754 } // namespace compiler
1755 } // namespace internal
1756 } // namespace v8
OLDNEW
« no previous file with comments | « test/cctest/compiler/test-machine-operator-reducer.cc ('k') | test/unittests/compiler/branch-elimination-unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698