OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/assembler.h" | 5 #include "src/assembler.h" |
6 #include "src/codegen.h" | 6 #include "src/codegen.h" |
7 #include "src/compiler/linkage.h" | 7 #include "src/compiler/linkage.h" |
8 #include "src/compiler/machine-type.h" | 8 #include "src/compiler/machine-type.h" |
9 #include "src/compiler/raw-machine-assembler.h" | 9 #include "src/compiler/raw-machine-assembler.h" |
10 | 10 |
11 #include "test/cctest/cctest.h" | 11 #include "test/cctest/cctest.h" |
12 #include "test/cctest/compiler/codegen-tester.h" | 12 #include "test/cctest/compiler/codegen-tester.h" |
13 #include "test/cctest/compiler/graph-builder-tester.h" | 13 #include "test/cctest/compiler/graph-builder-tester.h" |
14 #include "test/cctest/compiler/value-helper.h" | 14 #include "test/cctest/compiler/value-helper.h" |
15 | 15 |
16 using namespace v8::base; | 16 using namespace v8::base; |
17 using namespace v8::internal; | 17 using namespace v8::internal; |
18 using namespace v8::internal::compiler; | 18 using namespace v8::internal::compiler; |
19 | 19 |
20 typedef RawMachineAssembler::Label MLabel; | 20 typedef RawMachineAssembler::Label MLabel; |
21 | 21 |
22 #if !V8_TARGET_ARCH_ARM64 | 22 #if V8_TARGET_ARCH_ARM64 |
23 // TODO(titzer): fix native stack parameters on arm64 | 23 // TODO(titzer): fix native stack parameters on arm64 |
24 #define NATIVE_STACK_PARAMS_OK | 24 #define DISABLE_NATIVE_STACK_PARAMS true |
| 25 #else |
| 26 #define DISABLE_NATIVE_STACK_PARAMS false |
25 #endif | 27 #endif |
26 | 28 |
27 namespace { | 29 namespace { |
28 // Picks a representative set of registers from the allocatable set. | 30 typedef float float32; |
29 // If there are less than 100 possible pairs, do them all, otherwise try | 31 typedef double float64; |
| 32 |
| 33 // Picks a representative pair of integers from the given range. |
| 34 // If there are less than {max_pairs} possible pairs, do them all, otherwise try |
30 // to select a representative set. | 35 // to select a representative set. |
31 class RegisterPairs { | 36 class Pairs { |
32 public: | 37 public: |
33 RegisterPairs() | 38 Pairs(int max_pairs, int range) |
34 : max_(std::min(100, Register::kMaxNumAllocatableRegisters * | 39 : range_(range), |
35 Register::kMaxNumAllocatableRegisters)), | 40 max_pairs_(std::min(max_pairs, range_ * range_)), |
36 counter_(0) {} | 41 counter_(0) {} |
37 | 42 |
38 bool More() { return counter_ < max_; } | 43 bool More() { return counter_ < max_pairs_; } |
39 | 44 |
40 void Next(int* r0, int* r1, bool same_is_ok) { | 45 void Next(int* r0, int* r1, bool same_is_ok) { |
41 do { | 46 do { |
42 // Find the next pair. | 47 // Find the next pair. |
43 if (exhaustive()) { | 48 if (exhaustive()) { |
44 *r0 = counter_ % Register::kMaxNumAllocatableRegisters; | 49 *r0 = counter_ % range_; |
45 *r1 = counter_ / Register::kMaxNumAllocatableRegisters; | 50 *r1 = counter_ / range_; |
46 } else { | 51 } else { |
47 // Try each register at least once for both r0 and r1. | 52 // Try each integer at least once for both r0 and r1. |
48 int index = counter_ / 2; | 53 int index = counter_ / 2; |
49 if (counter_ & 1) { | 54 if (counter_ & 1) { |
50 *r0 = index % Register::kMaxNumAllocatableRegisters; | 55 *r0 = index % range_; |
51 *r1 = index / Register::kMaxNumAllocatableRegisters; | 56 *r1 = index / range_; |
52 } else { | 57 } else { |
53 *r1 = index % Register::kMaxNumAllocatableRegisters; | 58 *r1 = index % range_; |
54 *r0 = index / Register::kMaxNumAllocatableRegisters; | 59 *r0 = index / range_; |
55 } | 60 } |
56 } | 61 } |
57 counter_++; | 62 counter_++; |
58 if (same_is_ok) break; | 63 if (same_is_ok) break; |
59 if (*r0 == *r1) { | 64 if (*r0 == *r1) { |
60 if (counter_ >= max_) { | 65 if (counter_ >= max_pairs_) { |
61 // For the last hurrah, reg#0 with reg#n-1 | 66 // For the last hurrah, reg#0 with reg#n-1 |
62 *r0 = 0; | 67 *r0 = 0; |
63 *r1 = Register::kMaxNumAllocatableRegisters - 1; | 68 *r1 = range_ - 1; |
64 break; | 69 break; |
65 } | 70 } |
66 } | 71 } |
67 } while (true); | 72 } while (true); |
68 | 73 |
69 DCHECK(*r0 >= 0 && *r0 < Register::kMaxNumAllocatableRegisters); | 74 DCHECK(*r0 >= 0 && *r0 < range_); |
70 DCHECK(*r1 >= 0 && *r1 < Register::kMaxNumAllocatableRegisters); | 75 DCHECK(*r1 >= 0 && *r1 < range_); |
71 printf("pair = %d, %d\n", *r0, *r1); | |
72 } | 76 } |
73 | 77 |
74 private: | 78 private: |
75 int max_; | 79 int range_; |
| 80 int max_pairs_; |
76 int counter_; | 81 int counter_; |
77 bool exhaustive() { | 82 bool exhaustive() { return max_pairs_ == (range_ * range_); } |
78 return max_ == (Register::kMaxNumAllocatableRegisters * | |
79 Register::kMaxNumAllocatableRegisters); | |
80 } | |
81 }; | 83 }; |
82 | 84 |
83 | 85 |
| 86 // Pairs of general purpose registers. |
| 87 class RegisterPairs : public Pairs { |
| 88 public: |
| 89 RegisterPairs() : Pairs(100, Register::kMaxNumAllocatableRegisters) {} |
| 90 }; |
| 91 |
| 92 |
| 93 // Pairs of double registers. |
| 94 class Float32RegisterPairs : public Pairs { |
| 95 public: |
| 96 Float32RegisterPairs() |
| 97 : Pairs(100, DoubleRegister::NumAllocatableAliasedRegisters()) {} |
| 98 }; |
| 99 |
| 100 |
| 101 // Pairs of double registers. |
| 102 class Float64RegisterPairs : public Pairs { |
| 103 public: |
| 104 Float64RegisterPairs() |
| 105 : Pairs(100, DoubleRegister::NumAllocatableAliasedRegisters()) {} |
| 106 }; |
| 107 |
| 108 |
84 // Helper for allocating either an GP or FP reg, or the next stack slot. | 109 // Helper for allocating either an GP or FP reg, or the next stack slot. |
85 struct Allocator { | 110 struct Allocator { |
86 Allocator(int* gp, int gpc, int* fp, int fpc) | 111 Allocator(int* gp, int gpc, int* fp, int fpc) |
87 : gp_count(gpc), | 112 : gp_count(gpc), |
88 gp_offset(0), | 113 gp_offset(0), |
89 gp_regs(gp), | 114 gp_regs(gp), |
90 fp_count(fpc), | 115 fp_count(fpc), |
91 fp_offset(0), | 116 fp_offset(0), |
92 fp_regs(fp), | 117 fp_regs(fp), |
93 stack_offset(0) {} | 118 stack_offset(0) {} |
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
258 Node* ret = b.graph()->NewNode(b.common()->Return(), call, call, start); | 283 Node* ret = b.graph()->NewNode(b.common()->Return(), call, call, start); |
259 b.graph()->SetEnd(ret); | 284 b.graph()->SetEnd(ret); |
260 } | 285 } |
261 | 286 |
262 CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, msig); | 287 CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, msig); |
263 | 288 |
264 return CompileGraph("wrapper", cdesc, caller.graph()); | 289 return CompileGraph("wrapper", cdesc, caller.graph()); |
265 } | 290 } |
266 | 291 |
267 | 292 |
| 293 template <typename CType> |
| 294 class ArgsBuffer { |
| 295 public: |
| 296 static const int kMaxParamCount = 64; |
| 297 |
| 298 explicit ArgsBuffer(int count, int seed = 1) : count_(count), seed_(seed) { |
| 299 // initialize the buffer with "seed 0" |
| 300 seed_ = 0; |
| 301 Mutate(); |
| 302 seed_ = seed; |
| 303 } |
| 304 |
| 305 class Sig : public MachineSignature { |
| 306 public: |
| 307 explicit Sig(int param_count) |
| 308 : MachineSignature(1, param_count, MachTypes()) { |
| 309 CHECK(param_count <= kMaxParamCount); |
| 310 } |
| 311 }; |
| 312 |
| 313 static MachineType* MachTypes() { |
| 314 MachineType t = MachineTypeForC<CType>(); |
| 315 static MachineType kTypes[kMaxParamCount + 1] = { |
| 316 t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, |
| 317 t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, |
| 318 t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t}; |
| 319 return kTypes; |
| 320 } |
| 321 |
| 322 Node* MakeConstant(RawMachineAssembler& raw, int32_t value) { |
| 323 return raw.Int32Constant(value); |
| 324 } |
| 325 |
| 326 Node* MakeConstant(RawMachineAssembler& raw, int64_t value) { |
| 327 return raw.Int64Constant(value); |
| 328 } |
| 329 |
| 330 Node* MakeConstant(RawMachineAssembler& raw, float32 value) { |
| 331 return raw.Float32Constant(value); |
| 332 } |
| 333 |
| 334 Node* MakeConstant(RawMachineAssembler& raw, float64 value) { |
| 335 return raw.Float64Constant(value); |
| 336 } |
| 337 |
| 338 Node* LoadInput(RawMachineAssembler& raw, Node* base, int index) { |
| 339 Node* offset = raw.Int32Constant(index * sizeof(CType)); |
| 340 return raw.Load(MachineTypeForC<CType>(), base, offset); |
| 341 } |
| 342 |
| 343 Node* StoreOutput(RawMachineAssembler& raw, Node* value) { |
| 344 Node* base = raw.PointerConstant(&output); |
| 345 Node* offset = raw.Int32Constant(0); |
| 346 return raw.Store(MachineTypeForC<CType>(), base, offset, value); |
| 347 } |
| 348 |
| 349 // Computes the next set of inputs by updating the {input} array. |
| 350 void Mutate(); |
| 351 |
| 352 void Reset() { memset(input, 0, sizeof(input)); } |
| 353 |
| 354 int count_; |
| 355 int seed_; |
| 356 CType input[kMaxParamCount]; |
| 357 CType output; |
| 358 }; |
| 359 |
| 360 |
| 361 template <> |
| 362 void ArgsBuffer<int32_t>::Mutate() { |
| 363 uint32_t base = 1111111111u * seed_; |
| 364 for (int j = 0; j < count_; j++) { |
| 365 input[j] = static_cast<int32_t>(256 + base + j + seed_ * 13); |
| 366 } |
| 367 output = -1; |
| 368 seed_++; |
| 369 } |
| 370 |
| 371 |
| 372 template <> |
| 373 void ArgsBuffer<int64_t>::Mutate() { |
| 374 uint64_t base = 11111111111111111ull * seed_; |
| 375 for (int j = 0; j < count_; j++) { |
| 376 input[j] = static_cast<int64_t>(256 + base + j + seed_ * 13); |
| 377 } |
| 378 output = -1; |
| 379 seed_++; |
| 380 } |
| 381 |
| 382 |
| 383 template <> |
| 384 void ArgsBuffer<float32>::Mutate() { |
| 385 float64 base = -33.25 * seed_; |
| 386 for (int j = 0; j < count_; j++) { |
| 387 input[j] = 256 + base + j + seed_ * 13; |
| 388 } |
| 389 output = std::numeric_limits<float32>::quiet_NaN(); |
| 390 seed_++; |
| 391 } |
| 392 |
| 393 |
| 394 template <> |
| 395 void ArgsBuffer<float64>::Mutate() { |
| 396 float64 base = -111.25 * seed_; |
| 397 for (int j = 0; j < count_; j++) { |
| 398 input[j] = 256 + base + j + seed_ * 13; |
| 399 } |
| 400 output = std::numeric_limits<float64>::quiet_NaN(); |
| 401 seed_++; |
| 402 } |
| 403 |
| 404 |
| 405 int ParamCount(CallDescriptor* desc) { |
| 406 return static_cast<int>(desc->GetMachineSignature()->parameter_count()); |
| 407 } |
| 408 |
| 409 |
| 410 template <typename CType> |
| 411 class Computer { |
| 412 public: |
| 413 static void Run(CallDescriptor* desc, |
| 414 void (*build)(CallDescriptor*, RawMachineAssembler&), |
| 415 CType (*compute)(CallDescriptor*, CType* inputs), |
| 416 int seed = 1) { |
| 417 int num_params = ParamCount(desc); |
| 418 CHECK_LE(num_params, kMaxParamCount); |
| 419 Isolate* isolate = CcTest::InitIsolateOnce(); |
| 420 HandleScope scope(isolate); |
| 421 Handle<Code> inner = Handle<Code>::null(); |
| 422 { |
| 423 // Build the graph for the computation. |
| 424 Zone zone; |
| 425 Graph graph(&zone); |
| 426 RawMachineAssembler raw(isolate, &graph, desc); |
| 427 build(desc, raw); |
| 428 inner = CompileGraph("Compute", desc, &graph, raw.Export()); |
| 429 } |
| 430 |
| 431 CSignature0<int32_t> csig; |
| 432 ArgsBuffer<CType> io(num_params, seed); |
| 433 |
| 434 { |
| 435 // constant mode. |
| 436 Handle<Code> wrapper = Handle<Code>::null(); |
| 437 { |
| 438 // Wrap the above code with a callable function that passes constants. |
| 439 Zone zone; |
| 440 Graph graph(&zone); |
| 441 CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig); |
| 442 RawMachineAssembler raw(isolate, &graph, cdesc); |
| 443 Unique<HeapObject> unique = |
| 444 Unique<HeapObject>::CreateUninitialized(inner); |
| 445 Node* target = raw.HeapConstant(unique); |
| 446 Node** args = zone.NewArray<Node*>(kMaxParamCount); |
| 447 for (int i = 0; i < num_params; i++) { |
| 448 args[i] = io.MakeConstant(raw, io.input[i]); |
| 449 } |
| 450 |
| 451 Node* call = raw.CallN(desc, target, args); |
| 452 Node* store = io.StoreOutput(raw, call); |
| 453 USE(store); |
| 454 raw.Return(raw.Int32Constant(seed)); |
| 455 wrapper = |
| 456 CompileGraph("Compute-wrapper-const", cdesc, &graph, raw.Export()); |
| 457 } |
| 458 |
| 459 CodeRunner<int32_t> runnable(isolate, wrapper, &csig); |
| 460 |
| 461 // Run the code, checking it against the reference. |
| 462 CType expected = compute(desc, io.input); |
| 463 int32_t check_seed = runnable.Call(); |
| 464 CHECK_EQ(seed, check_seed); |
| 465 CHECK_EQ(expected, io.output); |
| 466 } |
| 467 |
| 468 { |
| 469 // buffer mode. |
| 470 Handle<Code> wrapper = Handle<Code>::null(); |
| 471 { |
| 472 // Wrap the above code with a callable function that loads from {input}. |
| 473 Zone zone; |
| 474 Graph graph(&zone); |
| 475 CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig); |
| 476 RawMachineAssembler raw(isolate, &graph, cdesc); |
| 477 Node* base = raw.PointerConstant(io.input); |
| 478 Unique<HeapObject> unique = |
| 479 Unique<HeapObject>::CreateUninitialized(inner); |
| 480 Node* target = raw.HeapConstant(unique); |
| 481 Node** args = zone.NewArray<Node*>(kMaxParamCount); |
| 482 for (int i = 0; i < num_params; i++) { |
| 483 args[i] = io.LoadInput(raw, base, i); |
| 484 } |
| 485 |
| 486 Node* call = raw.CallN(desc, target, args); |
| 487 Node* store = io.StoreOutput(raw, call); |
| 488 USE(store); |
| 489 raw.Return(raw.Int32Constant(seed)); |
| 490 wrapper = CompileGraph("Compute-wrapper", cdesc, &graph, raw.Export()); |
| 491 } |
| 492 |
| 493 CodeRunner<int32_t> runnable(isolate, wrapper, &csig); |
| 494 |
| 495 // Run the code, checking it against the reference. |
| 496 for (int i = 0; i < 5; i++) { |
| 497 CType expected = compute(desc, io.input); |
| 498 int32_t check_seed = runnable.Call(); |
| 499 CHECK_EQ(seed, check_seed); |
| 500 CHECK_EQ(expected, io.output); |
| 501 io.Mutate(); |
| 502 } |
| 503 } |
| 504 } |
| 505 }; |
| 506 |
268 } // namespace | 507 } // namespace |
269 | 508 |
270 | 509 |
271 static void TestInt32Sub(CallDescriptor* desc) { | 510 static void TestInt32Sub(CallDescriptor* desc) { |
272 Isolate* isolate = CcTest::InitIsolateOnce(); | 511 Isolate* isolate = CcTest::InitIsolateOnce(); |
273 HandleScope scope(isolate); | 512 HandleScope scope(isolate); |
274 Zone zone; | 513 Zone zone; |
275 GraphAndBuilders inner(&zone); | 514 GraphAndBuilders inner(&zone); |
276 { | 515 { |
277 // Build the add function. | 516 // Build the add function. |
(...skipping 18 matching lines...) Expand all Loading... |
296 FOR_INT32_INPUTS(j) { | 535 FOR_INT32_INPUTS(j) { |
297 int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*i) - | 536 int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*i) - |
298 static_cast<uint32_t>(*j)); | 537 static_cast<uint32_t>(*j)); |
299 int32_t result = runnable.Call(*i, *j); | 538 int32_t result = runnable.Call(*i, *j); |
300 CHECK_EQ(expected, result); | 539 CHECK_EQ(expected, result); |
301 } | 540 } |
302 } | 541 } |
303 } | 542 } |
304 | 543 |
305 | 544 |
306 #ifdef NATIVE_STACK_PARAMS_OK | |
307 static void CopyTwentyInt32(CallDescriptor* desc) { | 545 static void CopyTwentyInt32(CallDescriptor* desc) { |
| 546 if (DISABLE_NATIVE_STACK_PARAMS) return; |
| 547 |
308 const int kNumParams = 20; | 548 const int kNumParams = 20; |
309 int32_t input[kNumParams]; | 549 int32_t input[kNumParams]; |
310 int32_t output[kNumParams]; | 550 int32_t output[kNumParams]; |
311 Isolate* isolate = CcTest::InitIsolateOnce(); | 551 Isolate* isolate = CcTest::InitIsolateOnce(); |
312 HandleScope scope(isolate); | 552 HandleScope scope(isolate); |
313 Handle<Code> inner = Handle<Code>::null(); | 553 Handle<Code> inner = Handle<Code>::null(); |
314 { | 554 { |
315 // Writes all parameters into the output buffer. | 555 // Writes all parameters into the output buffer. |
316 Zone zone; | 556 Zone zone; |
317 Graph graph(&zone); | 557 Graph graph(&zone); |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
358 } | 598 } |
359 | 599 |
360 memset(output, 0, sizeof(output)); | 600 memset(output, 0, sizeof(output)); |
361 CHECK_EQ(42, runnable.Call()); | 601 CHECK_EQ(42, runnable.Call()); |
362 | 602 |
363 for (int j = 0; j < kNumParams; j++) { | 603 for (int j = 0; j < kNumParams; j++) { |
364 CHECK_EQ(input[j], output[j]); | 604 CHECK_EQ(input[j], output[j]); |
365 } | 605 } |
366 } | 606 } |
367 } | 607 } |
368 #endif // NATIVE_STACK_PARAMS_OK | |
369 | 608 |
370 | 609 |
371 static void Test_RunInt32SubWithRet(int retreg) { | 610 static void Test_RunInt32SubWithRet(int retreg) { |
372 Int32Signature sig(2); | 611 Int32Signature sig(2); |
373 Zone zone; | 612 Zone zone; |
374 RegisterPairs pairs; | 613 RegisterPairs pairs; |
375 while (pairs.More()) { | 614 while (pairs.More()) { |
376 int parray[2]; | 615 int parray[2]; |
377 int rarray[] = {retreg}; | 616 int rarray[] = {retreg}; |
378 pairs.Next(&parray[0], &parray[1], false); | 617 pairs.Next(&parray[0], &parray[1], false); |
(...skipping 29 matching lines...) Expand all Loading... |
408 TEST_INT32_SUB_WITH_RET(13) | 647 TEST_INT32_SUB_WITH_RET(13) |
409 TEST_INT32_SUB_WITH_RET(14) | 648 TEST_INT32_SUB_WITH_RET(14) |
410 TEST_INT32_SUB_WITH_RET(15) | 649 TEST_INT32_SUB_WITH_RET(15) |
411 TEST_INT32_SUB_WITH_RET(16) | 650 TEST_INT32_SUB_WITH_RET(16) |
412 TEST_INT32_SUB_WITH_RET(17) | 651 TEST_INT32_SUB_WITH_RET(17) |
413 TEST_INT32_SUB_WITH_RET(18) | 652 TEST_INT32_SUB_WITH_RET(18) |
414 TEST_INT32_SUB_WITH_RET(19) | 653 TEST_INT32_SUB_WITH_RET(19) |
415 | 654 |
416 | 655 |
417 TEST(Run_Int32Sub_all_allocatable_single) { | 656 TEST(Run_Int32Sub_all_allocatable_single) { |
418 #ifdef NATIVE_STACK_PARAMS_OK | 657 if (DISABLE_NATIVE_STACK_PARAMS) return; |
419 Int32Signature sig(2); | 658 Int32Signature sig(2); |
420 RegisterPairs pairs; | 659 RegisterPairs pairs; |
421 while (pairs.More()) { | 660 while (pairs.More()) { |
422 Zone zone; | 661 Zone zone; |
423 int parray[1]; | 662 int parray[1]; |
424 int rarray[1]; | 663 int rarray[1]; |
425 pairs.Next(&rarray[0], &parray[0], true); | 664 pairs.Next(&rarray[0], &parray[0], true); |
426 Allocator params(parray, 1, nullptr, 0); | 665 Allocator params(parray, 1, nullptr, 0); |
427 Allocator rets(rarray, 1, nullptr, 0); | 666 Allocator rets(rarray, 1, nullptr, 0); |
428 RegisterConfig config(params, rets); | 667 RegisterConfig config(params, rets); |
429 CallDescriptor* desc = config.Create(&zone, &sig); | 668 CallDescriptor* desc = config.Create(&zone, &sig); |
430 TestInt32Sub(desc); | 669 TestInt32Sub(desc); |
431 } | 670 } |
432 #endif // NATIVE_STACK_PARAMS_OK | |
433 } | 671 } |
434 | 672 |
435 | 673 |
436 TEST(Run_CopyTwentyInt32_all_allocatable_pairs) { | 674 TEST(Run_CopyTwentyInt32_all_allocatable_pairs) { |
437 #ifdef NATIVE_STACK_PARAMS_OK | 675 if (DISABLE_NATIVE_STACK_PARAMS) return; |
438 Int32Signature sig(20); | 676 Int32Signature sig(20); |
439 RegisterPairs pairs; | 677 RegisterPairs pairs; |
440 while (pairs.More()) { | 678 while (pairs.More()) { |
441 Zone zone; | 679 Zone zone; |
442 int parray[2]; | 680 int parray[2]; |
443 int rarray[] = {0}; | 681 int rarray[] = {0}; |
444 pairs.Next(&parray[0], &parray[1], false); | 682 pairs.Next(&parray[0], &parray[1], false); |
445 Allocator params(parray, 2, nullptr, 0); | 683 Allocator params(parray, 2, nullptr, 0); |
446 Allocator rets(rarray, 1, nullptr, 0); | 684 Allocator rets(rarray, 1, nullptr, 0); |
447 RegisterConfig config(params, rets); | 685 RegisterConfig config(params, rets); |
448 CallDescriptor* desc = config.Create(&zone, &sig); | 686 CallDescriptor* desc = config.Create(&zone, &sig); |
449 CopyTwentyInt32(desc); | 687 CopyTwentyInt32(desc); |
450 } | 688 } |
451 #endif // NATIVE_STACK_PARAMS_OK | |
452 } | |
453 | |
454 | |
455 #ifdef NATIVE_STACK_PARAMS_OK | |
456 int ParamCount(CallDescriptor* desc) { | |
457 return static_cast<int>(desc->GetMachineSignature()->parameter_count()); | |
458 } | |
459 | |
460 | |
461 // Super mega helper routine to generate a computation with a given | |
462 // call descriptor, compile the code, wrap the code, and pass various inputs, | |
463 // comparing against a reference implementation. | |
464 static void Run_Int32_Computation( | |
465 CallDescriptor* desc, void (*build)(CallDescriptor*, RawMachineAssembler&), | |
466 int32_t (*compute)(CallDescriptor*, int32_t* inputs), int seed = 1) { | |
467 int num_params = ParamCount(desc); | |
468 CHECK_LE(num_params, kMaxParamCount); | |
469 int32_t input[kMaxParamCount]; | |
470 Isolate* isolate = CcTest::InitIsolateOnce(); | |
471 HandleScope scope(isolate); | |
472 Handle<Code> inner = Handle<Code>::null(); | |
473 { | |
474 // Build the graph for the computation. | |
475 Zone zone; | |
476 Graph graph(&zone); | |
477 RawMachineAssembler raw(isolate, &graph, desc); | |
478 build(desc, raw); | |
479 inner = CompileGraph("Compute", desc, &graph, raw.Export()); | |
480 } | |
481 | |
482 CSignature0<int32_t> csig; | |
483 | |
484 if (false) { | |
485 // constant mode. | |
486 Handle<Code> wrapper = Handle<Code>::null(); | |
487 { | |
488 // Wrap the above code with a callable function that passes constants. | |
489 Zone zone; | |
490 Graph graph(&zone); | |
491 CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig); | |
492 RawMachineAssembler raw(isolate, &graph, cdesc); | |
493 Unique<HeapObject> unique = | |
494 Unique<HeapObject>::CreateUninitialized(inner); | |
495 Node* target = raw.HeapConstant(unique); | |
496 Node** args = zone.NewArray<Node*>(kMaxParamCount); | |
497 for (int i = 0; i < num_params; i++) { | |
498 args[i] = raw.Int32Constant(0x100 + i); | |
499 } | |
500 | |
501 Node* call = raw.CallN(desc, target, args); | |
502 raw.Return(call); | |
503 wrapper = CompileGraph("Compute-wrapper", cdesc, &graph, raw.Export()); | |
504 } | |
505 | |
506 CodeRunner<int32_t> runnable(isolate, wrapper, &csig); | |
507 | |
508 // Run the code, checking it against the reference. | |
509 for (int j = 0; j < kMaxParamCount; j++) { | |
510 input[j] = 0x100 + j; | |
511 } | |
512 int32_t expected = compute(desc, input); | |
513 int32_t result = runnable.Call(); | |
514 | |
515 CHECK_EQ(expected, result); | |
516 } | |
517 | |
518 { | |
519 // buffer mode. | |
520 Handle<Code> wrapper = Handle<Code>::null(); | |
521 { | |
522 // Wrap the above code with a callable function that loads from {input}. | |
523 Zone zone; | |
524 Graph graph(&zone); | |
525 CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig); | |
526 RawMachineAssembler raw(isolate, &graph, cdesc); | |
527 Node* base = raw.PointerConstant(input); | |
528 Unique<HeapObject> unique = | |
529 Unique<HeapObject>::CreateUninitialized(inner); | |
530 Node* target = raw.HeapConstant(unique); | |
531 Node** args = zone.NewArray<Node*>(kMaxParamCount); | |
532 for (int i = 0; i < num_params; i++) { | |
533 Node* offset = raw.Int32Constant(i * sizeof(int32_t)); | |
534 args[i] = raw.Load(kMachInt32, base, offset); | |
535 } | |
536 | |
537 Node* call = raw.CallN(desc, target, args); | |
538 raw.Return(call); | |
539 wrapper = CompileGraph("Compute-wrapper", cdesc, &graph, raw.Export()); | |
540 } | |
541 | |
542 CodeRunner<int32_t> runnable(isolate, wrapper, &csig); | |
543 | |
544 // Run the code, checking it against the reference. | |
545 for (int i = 0; i < 5; i++) { | |
546 // Use pseudo-random values for each run, but the first run gets args | |
547 // 100, 101, 102, 103... for easier diagnosis. | |
548 uint32_t base = 1111111111u * i * seed; | |
549 for (int j = 0; j < kMaxParamCount; j++) { | |
550 input[j] = static_cast<int32_t>(100 + base + j); | |
551 } | |
552 int32_t expected = compute(desc, input); | |
553 int32_t result = runnable.Call(); | |
554 | |
555 CHECK_EQ(expected, result); | |
556 } | |
557 } | |
558 } | 689 } |
559 | 690 |
560 | 691 |
| 692 template <typename CType> |
| 693 static void Run_Computation( |
| 694 CallDescriptor* desc, void (*build)(CallDescriptor*, RawMachineAssembler&), |
| 695 CType (*compute)(CallDescriptor*, CType* inputs), int seed = 1) { |
| 696 Computer<CType>::Run(desc, build, compute, seed); |
| 697 } |
| 698 |
| 699 |
561 static uint32_t coeff[] = {1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, | 700 static uint32_t coeff[] = {1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, |
562 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, | 701 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, |
563 79, 83, 89, 97, 101, 103, 107, 109, 113}; | 702 79, 83, 89, 97, 101, 103, 107, 109, 113}; |
564 | 703 |
565 | 704 |
566 static void Build_Int32_WeightedSum(CallDescriptor* desc, | 705 static void Build_Int32_WeightedSum(CallDescriptor* desc, |
567 RawMachineAssembler& raw) { | 706 RawMachineAssembler& raw) { |
568 Node* result = raw.Int32Constant(0); | 707 Node* result = raw.Int32Constant(0); |
569 for (int i = 0; i < ParamCount(desc); i++) { | 708 for (int i = 0; i < ParamCount(desc); i++) { |
570 Node* term = raw.Int32Mul(raw.Parameter(i), raw.Int32Constant(coeff[i])); | 709 Node* term = raw.Int32Mul(raw.Parameter(i), raw.Int32Constant(coeff[i])); |
571 result = raw.Int32Add(result, term); | 710 result = raw.Int32Add(result, term); |
572 } | 711 } |
573 raw.Return(result); | 712 raw.Return(result); |
574 } | 713 } |
575 | 714 |
576 | 715 |
577 static int32_t Compute_Int32_WeightedSum(CallDescriptor* desc, int32_t* input) { | 716 static int32_t Compute_Int32_WeightedSum(CallDescriptor* desc, int32_t* input) { |
578 uint32_t result = 0; | 717 uint32_t result = 0; |
579 for (int i = 0; i < ParamCount(desc); i++) { | 718 for (int i = 0; i < ParamCount(desc); i++) { |
580 result += static_cast<uint32_t>(input[i]) * coeff[i]; | 719 result += static_cast<uint32_t>(input[i]) * coeff[i]; |
581 } | 720 } |
582 return static_cast<int32_t>(result); | 721 return static_cast<int32_t>(result); |
583 } | 722 } |
584 | 723 |
585 | 724 |
586 static void Test_Int32_WeightedSum_of_size(int count) { | 725 static void Test_Int32_WeightedSum_of_size(int count) { |
| 726 if (DISABLE_NATIVE_STACK_PARAMS) return; |
587 Int32Signature sig(count); | 727 Int32Signature sig(count); |
588 for (int p0 = 0; p0 < Register::kMaxNumAllocatableRegisters; p0++) { | 728 for (int p0 = 0; p0 < Register::kMaxNumAllocatableRegisters; p0++) { |
589 Zone zone; | 729 Zone zone; |
590 | 730 |
591 int parray[] = {p0}; | 731 int parray[] = {p0}; |
592 int rarray[] = {0}; | 732 int rarray[] = {0}; |
593 Allocator params(parray, 1, nullptr, 0); | 733 Allocator params(parray, 1, nullptr, 0); |
594 Allocator rets(rarray, 1, nullptr, 0); | 734 Allocator rets(rarray, 1, nullptr, 0); |
595 RegisterConfig config(params, rets); | 735 RegisterConfig config(params, rets); |
596 CallDescriptor* desc = config.Create(&zone, &sig); | 736 CallDescriptor* desc = config.Create(&zone, &sig); |
597 Run_Int32_Computation(desc, Build_Int32_WeightedSum, | 737 Run_Computation<int32_t>(desc, Build_Int32_WeightedSum, |
598 Compute_Int32_WeightedSum, 257 + count); | 738 Compute_Int32_WeightedSum, 257 + count); |
599 } | 739 } |
600 } | 740 } |
601 | 741 |
602 | 742 |
603 // Separate tests for parallelization. | 743 // Separate tests for parallelization. |
604 #define TEST_INT32_WEIGHTEDSUM(x) \ | 744 #define TEST_INT32_WEIGHTEDSUM(x) \ |
605 TEST(Run_Int32_WeightedSum_##x) { Test_Int32_WeightedSum_of_size(x); } | 745 TEST(Run_Int32_WeightedSum_##x) { Test_Int32_WeightedSum_of_size(x); } |
606 | 746 |
607 | 747 |
608 TEST_INT32_WEIGHTEDSUM(1) | 748 TEST_INT32_WEIGHTEDSUM(1) |
609 TEST_INT32_WEIGHTEDSUM(2) | 749 TEST_INT32_WEIGHTEDSUM(2) |
610 TEST_INT32_WEIGHTEDSUM(3) | 750 TEST_INT32_WEIGHTEDSUM(3) |
611 TEST_INT32_WEIGHTEDSUM(4) | 751 TEST_INT32_WEIGHTEDSUM(4) |
612 TEST_INT32_WEIGHTEDSUM(5) | 752 TEST_INT32_WEIGHTEDSUM(5) |
613 TEST_INT32_WEIGHTEDSUM(7) | 753 TEST_INT32_WEIGHTEDSUM(7) |
614 TEST_INT32_WEIGHTEDSUM(9) | 754 TEST_INT32_WEIGHTEDSUM(9) |
615 TEST_INT32_WEIGHTEDSUM(11) | 755 TEST_INT32_WEIGHTEDSUM(11) |
616 TEST_INT32_WEIGHTEDSUM(17) | 756 TEST_INT32_WEIGHTEDSUM(17) |
617 TEST_INT32_WEIGHTEDSUM(19) | 757 TEST_INT32_WEIGHTEDSUM(19) |
618 | 758 |
619 | 759 |
620 template <int which> | 760 template <int which> |
621 static void Build_Int32_Select(CallDescriptor* desc, RawMachineAssembler& raw) { | 761 static void Build_Select(CallDescriptor* desc, RawMachineAssembler& raw) { |
622 raw.Return(raw.Parameter(which)); | 762 raw.Return(raw.Parameter(which)); |
623 } | 763 } |
624 | 764 |
625 | 765 |
626 template <int which> | 766 template <typename CType, int which> |
627 static int32_t Compute_Int32_Select(CallDescriptor* desc, int32_t* inputs) { | 767 static CType Compute_Select(CallDescriptor* desc, CType* inputs) { |
628 return inputs[which]; | 768 return inputs[which]; |
629 } | 769 } |
630 | 770 |
631 | 771 |
632 template <int which> | 772 template <int which> |
633 void Test_Int32_Select() { | 773 void Test_Int32_Select() { |
| 774 if (DISABLE_NATIVE_STACK_PARAMS) return; |
| 775 |
634 int parray[] = {0}; | 776 int parray[] = {0}; |
635 int rarray[] = {0}; | 777 int rarray[] = {0}; |
636 Allocator params(parray, 1, nullptr, 0); | 778 Allocator params(parray, 1, nullptr, 0); |
637 Allocator rets(rarray, 1, nullptr, 0); | 779 Allocator rets(rarray, 1, nullptr, 0); |
638 RegisterConfig config(params, rets); | 780 RegisterConfig config(params, rets); |
639 | 781 |
640 Zone zone; | 782 Zone zone; |
641 | 783 |
642 for (int i = which + 1; i <= 64; i++) { | 784 for (int i = which + 1; i <= 64; i++) { |
643 Int32Signature sig(i); | 785 Int32Signature sig(i); |
644 CallDescriptor* desc = config.Create(&zone, &sig); | 786 CallDescriptor* desc = config.Create(&zone, &sig); |
645 Run_Int32_Computation(desc, Build_Int32_Select<which>, | 787 Run_Computation<int32_t>(desc, Build_Select<which>, |
646 Compute_Int32_Select<which>, 1025 + which); | 788 Compute_Select<int32_t, which>, 1025 + which); |
647 } | 789 } |
648 } | 790 } |
649 | 791 |
650 | 792 |
651 // Separate tests for parallelization. | 793 // Separate tests for parallelization. |
652 #define TEST_INT32_SELECT(x) \ | 794 #define TEST_INT32_SELECT(x) \ |
653 TEST(Run_Int32_Select_##x) { Test_Int32_Select<x>(); } | 795 TEST(Run_Int32_Select_##x) { Test_Int32_Select<x>(); } |
654 | 796 |
655 | 797 |
656 TEST_INT32_SELECT(0) | 798 TEST_INT32_SELECT(0) |
657 TEST_INT32_SELECT(1) | 799 TEST_INT32_SELECT(1) |
658 TEST_INT32_SELECT(2) | 800 TEST_INT32_SELECT(2) |
659 TEST_INT32_SELECT(3) | 801 TEST_INT32_SELECT(3) |
660 TEST_INT32_SELECT(4) | 802 TEST_INT32_SELECT(4) |
661 TEST_INT32_SELECT(5) | 803 TEST_INT32_SELECT(5) |
662 TEST_INT32_SELECT(6) | 804 TEST_INT32_SELECT(6) |
663 TEST_INT32_SELECT(11) | 805 TEST_INT32_SELECT(11) |
664 TEST_INT32_SELECT(15) | 806 TEST_INT32_SELECT(15) |
665 TEST_INT32_SELECT(19) | 807 TEST_INT32_SELECT(19) |
666 TEST_INT32_SELECT(45) | 808 TEST_INT32_SELECT(45) |
667 TEST_INT32_SELECT(62) | 809 TEST_INT32_SELECT(62) |
668 TEST_INT32_SELECT(63) | 810 TEST_INT32_SELECT(63) |
669 #endif // NATIVE_STACK_PARAMS_OK | |
670 | 811 |
671 | 812 |
672 TEST(TheLastTestForLint) { | 813 TEST(Int64Select_registers) { |
673 // Yes, thank you. | 814 if (Register::kMaxNumAllocatableRegisters < 2) return; |
| 815 if (kPointerSize < 8) return; // TODO(titzer): int64 on 32-bit platforms |
| 816 |
| 817 int rarray[] = {0}; |
| 818 ArgsBuffer<int64_t>::Sig sig(2); |
| 819 |
| 820 RegisterPairs pairs; |
| 821 Zone zone; |
| 822 while (pairs.More()) { |
| 823 int parray[2]; |
| 824 pairs.Next(&parray[0], &parray[1], false); |
| 825 Allocator params(parray, 2, nullptr, 0); |
| 826 Allocator rets(rarray, 1, nullptr, 0); |
| 827 RegisterConfig config(params, rets); |
| 828 |
| 829 CallDescriptor* desc = config.Create(&zone, &sig); |
| 830 Run_Computation<int64_t>(desc, Build_Select<0>, Compute_Select<int64_t, 0>, |
| 831 1021); |
| 832 |
| 833 Run_Computation<int64_t>(desc, Build_Select<1>, Compute_Select<int64_t, 1>, |
| 834 1022); |
| 835 } |
674 } | 836 } |
| 837 |
| 838 |
| 839 TEST(Float32Select_registers) { |
| 840 if (DoubleRegister::kMaxNumAllocatableRegisters < 2) return; |
| 841 |
| 842 int rarray[] = {0}; |
| 843 ArgsBuffer<float32>::Sig sig(2); |
| 844 |
| 845 Float32RegisterPairs pairs; |
| 846 Zone zone; |
| 847 while (pairs.More()) { |
| 848 int parray[2]; |
| 849 pairs.Next(&parray[0], &parray[1], false); |
| 850 Allocator params(nullptr, 0, parray, 2); |
| 851 Allocator rets(nullptr, 0, rarray, 1); |
| 852 RegisterConfig config(params, rets); |
| 853 |
| 854 CallDescriptor* desc = config.Create(&zone, &sig); |
| 855 Run_Computation<float32>(desc, Build_Select<0>, Compute_Select<float32, 0>, |
| 856 1019); |
| 857 |
| 858 Run_Computation<float32>(desc, Build_Select<1>, Compute_Select<float32, 1>, |
| 859 1018); |
| 860 } |
| 861 } |
| 862 |
| 863 |
| 864 TEST(Float64Select_registers) { |
| 865 if (DoubleRegister::kMaxNumAllocatableRegisters < 2) return; |
| 866 |
| 867 int rarray[] = {0}; |
| 868 ArgsBuffer<float64>::Sig sig(2); |
| 869 |
| 870 Float64RegisterPairs pairs; |
| 871 Zone zone; |
| 872 while (pairs.More()) { |
| 873 int parray[2]; |
| 874 pairs.Next(&parray[0], &parray[1], false); |
| 875 Allocator params(nullptr, 0, parray, 2); |
| 876 Allocator rets(nullptr, 0, rarray, 1); |
| 877 RegisterConfig config(params, rets); |
| 878 |
| 879 CallDescriptor* desc = config.Create(&zone, &sig); |
| 880 Run_Computation<float64>(desc, Build_Select<0>, Compute_Select<float64, 0>, |
| 881 1033); |
| 882 |
| 883 Run_Computation<float64>(desc, Build_Select<1>, Compute_Select<float64, 1>, |
| 884 1034); |
| 885 } |
| 886 } |
OLD | NEW |