Chromium Code Reviews

Side by Side Diff: test/cctest/compiler/test-run-native-calls.cc

Issue 1291553005: Add tests for float32/float64 parameters/returns passed in float32/float64 registers. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View unified diff |
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/assembler.h" 5 #include "src/assembler.h"
6 #include "src/codegen.h" 6 #include "src/codegen.h"
7 #include "src/compiler/linkage.h" 7 #include "src/compiler/linkage.h"
8 #include "src/compiler/machine-type.h" 8 #include "src/compiler/machine-type.h"
9 #include "src/compiler/raw-machine-assembler.h" 9 #include "src/compiler/raw-machine-assembler.h"
10 10
11 #include "test/cctest/cctest.h" 11 #include "test/cctest/cctest.h"
12 #include "test/cctest/compiler/codegen-tester.h" 12 #include "test/cctest/compiler/codegen-tester.h"
13 #include "test/cctest/compiler/graph-builder-tester.h" 13 #include "test/cctest/compiler/graph-builder-tester.h"
14 #include "test/cctest/compiler/value-helper.h" 14 #include "test/cctest/compiler/value-helper.h"
15 15
16 using namespace v8::base; 16 using namespace v8::base;
17 using namespace v8::internal; 17 using namespace v8::internal;
18 using namespace v8::internal::compiler; 18 using namespace v8::internal::compiler;
19 19
20 typedef RawMachineAssembler::Label MLabel; 20 typedef RawMachineAssembler::Label MLabel;
21 21
22 #if !V8_TARGET_ARCH_ARM64 22 #if V8_TARGET_ARCH_ARM64
23 // TODO(titzer): fix native stack parameters on arm64 23 // TODO(titzer): fix native stack parameters on arm64
24 #define NATIVE_STACK_PARAMS_OK 24 #define DISABLE_NATIVE_STACK_PARAMS true
25 #else
26 #define DISABLE_NATIVE_STACK_PARAMS false
25 #endif 27 #endif
26 28
27 namespace { 29 namespace {
28 // Picks a representative set of registers from the allocatable set. 30 typedef float float32;
29 // If there are less than 100 possible pairs, do them all, otherwise try 31 typedef double float64;
32
33 // Picks a representative set of integers from the gi
Michael Starzinger 2015/08/12 17:48:41 nit: comment looks incomplete.
34 // If there are less than {max_pairs} possible pairs, do them all, otherwise try
30 // to select a representative set. 35 // to select a representative set.
31 class RegisterPairs { 36 class Pairs {
32 public: 37 public:
33 RegisterPairs() 38 Pairs(int max_pairs, int range)
34 : max_(std::min(100, Register::kMaxNumAllocatableRegisters * 39 : range_(range),
35 Register::kMaxNumAllocatableRegisters)), 40 max_pairs_(std::min(max_pairs, range_ * range_)),
36 counter_(0) {} 41 counter_(0) {}
37 42
38 bool More() { return counter_ < max_; } 43 bool More() { return counter_ < max_pairs_; }
39 44
40 void Next(int* r0, int* r1, bool same_is_ok) { 45 void Next(int* r0, int* r1, bool same_is_ok) {
41 do { 46 do {
42 // Find the next pair. 47 // Find the next pair.
43 if (exhaustive()) { 48 if (exhaustive()) {
44 *r0 = counter_ % Register::kMaxNumAllocatableRegisters; 49 *r0 = counter_ % range_;
45 *r1 = counter_ / Register::kMaxNumAllocatableRegisters; 50 *r1 = counter_ / range_;
46 } else { 51 } else {
47 // Try each register at least once for both r0 and r1. 52 // Try each integer at least once for both r0 and r1.
48 int index = counter_ / 2; 53 int index = counter_ / 2;
49 if (counter_ & 1) { 54 if (counter_ & 1) {
50 *r0 = index % Register::kMaxNumAllocatableRegisters; 55 *r0 = index % range_;
51 *r1 = index / Register::kMaxNumAllocatableRegisters; 56 *r1 = index / range_;
52 } else { 57 } else {
53 *r1 = index % Register::kMaxNumAllocatableRegisters; 58 *r1 = index % range_;
54 *r0 = index / Register::kMaxNumAllocatableRegisters; 59 *r0 = index / range_;
55 } 60 }
56 } 61 }
57 counter_++; 62 counter_++;
58 if (same_is_ok) break; 63 if (same_is_ok) break;
59 if (*r0 == *r1) { 64 if (*r0 == *r1) {
60 if (counter_ >= max_) { 65 if (counter_ >= max_pairs_) {
61 // For the last hurrah, reg#0 with reg#n-1 66 // For the last hurrah, reg#0 with reg#n-1
62 *r0 = 0; 67 *r0 = 0;
63 *r1 = Register::kMaxNumAllocatableRegisters - 1; 68 *r1 = range_ - 1;
64 break; 69 break;
65 } 70 }
66 } 71 }
67 } while (true); 72 } while (true);
68 73
69 DCHECK(*r0 >= 0 && *r0 < Register::kMaxNumAllocatableRegisters); 74 DCHECK(*r0 >= 0 && *r0 < range_);
70 DCHECK(*r1 >= 0 && *r1 < Register::kMaxNumAllocatableRegisters); 75 DCHECK(*r1 >= 0 && *r1 < range_);
71 printf("pair = %d, %d\n", *r0, *r1); 76 // printf("pair = %d, %d\n", *r0, *r1);
Michael Starzinger 2015/08/12 17:48:41 nit: Looks like a leftover.
72 } 77 }
73 78
74 private: 79 private:
75 int max_; 80 int range_;
81 int max_pairs_;
76 int counter_; 82 int counter_;
77 bool exhaustive() { 83 bool exhaustive() { return max_pairs_ == (range_ * range_); }
78 return max_ == (Register::kMaxNumAllocatableRegisters *
79 Register::kMaxNumAllocatableRegisters);
80 }
81 }; 84 };
82 85
83 86
87 // Pairs of general purpose registers.
88 class RegisterPairs : public Pairs {
89 public:
90 RegisterPairs() : Pairs(100, Register::kMaxNumAllocatableRegisters) {}
91 };
92
93
94 // Pairs of double registers.
95 class Float32RegisterPairs : public Pairs {
96 public:
97 Float32RegisterPairs()
98 : Pairs(100, DoubleRegister::NumAllocatableAliasedRegisters()) {}
99 };
100
101
102 // Pairs of double registers.
103 class Float64RegisterPairs : public Pairs {
104 public:
105 Float64RegisterPairs()
106 : Pairs(100, DoubleRegister::NumAllocatableAliasedRegisters()) {}
107 };
108
109
84 // Helper for allocating either an GP or FP reg, or the next stack slot. 110 // Helper for allocating either an GP or FP reg, or the next stack slot.
85 struct Allocator { 111 struct Allocator {
86 Allocator(int* gp, int gpc, int* fp, int fpc) 112 Allocator(int* gp, int gpc, int* fp, int fpc)
87 : gp_count(gpc), 113 : gp_count(gpc),
88 gp_offset(0), 114 gp_offset(0),
89 gp_regs(gp), 115 gp_regs(gp),
90 fp_count(fpc), 116 fp_count(fpc),
91 fp_offset(0), 117 fp_offset(0),
92 fp_regs(fp), 118 fp_regs(fp),
93 stack_offset(0) {} 119 stack_offset(0) {}
(...skipping 164 matching lines...)
258 Node* ret = b.graph()->NewNode(b.common()->Return(), call, call, start); 284 Node* ret = b.graph()->NewNode(b.common()->Return(), call, call, start);
259 b.graph()->SetEnd(ret); 285 b.graph()->SetEnd(ret);
260 } 286 }
261 287
262 CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, msig); 288 CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, msig);
263 289
264 return CompileGraph("wrapper", cdesc, caller.graph()); 290 return CompileGraph("wrapper", cdesc, caller.graph());
265 } 291 }
266 292
267 293
294 template <typename CType>
295 class ArgsBuffer {
296 public:
297 static const int kMaxParamCount = 64;
298
299 explicit ArgsBuffer(int count, int seed = 1) : count_(count), seed_(seed) {
300 // initialize the buffer with "seed 0"
301 seed_ = 0;
302 Mutate();
303 seed_ = seed;
304 }
305
306 class Sig : public MachineSignature {
307 public:
308 explicit Sig(int param_count)
309 : MachineSignature(1, param_count, MachTypes()) {
310 CHECK(param_count <= kMaxParamCount);
311 }
312 };
313
314 static MachineType* MachTypes() {
315 MachineType t = MachineTypeForC<CType>();
316 static MachineType kTypes[kMaxParamCount + 1] = {
317 t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t,
318 t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t,
319 t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t};
320 return kTypes;
321 }
322
323 Node* MakeConstant(RawMachineAssembler& raw, int32_t value) {
324 return raw.Int32Constant(value);
325 }
326
327 Node* MakeConstant(RawMachineAssembler& raw, int64_t value) {
328 return raw.Int64Constant(value);
329 }
330
331 Node* MakeConstant(RawMachineAssembler& raw, float32 value) {
332 return raw.Float32Constant(value);
333 }
334
335 Node* MakeConstant(RawMachineAssembler& raw, float64 value) {
336 return raw.Float64Constant(value);
337 }
338
339 Node* LoadInput(RawMachineAssembler& raw, Node* base, int index) {
340 Node* offset = raw.Int32Constant(index * sizeof(CType));
341 return raw.Load(MachineTypeForC<CType>(), base, offset);
342 }
343
344 Node* StoreOutput(RawMachineAssembler& raw, Node* value) {
345 Node* base = raw.PointerConstant(&output);
346 Node* offset = raw.Int32Constant(0);
347 return raw.Store(MachineTypeForC<CType>(), base, offset, value);
348 }
349
350 void Mutate();
351
352 void Reset() { memset(input, 0, sizeof(input)); }
353
354 int count_;
355 int seed_;
356 CType input[kMaxParamCount];
357 CType output;
358 };
359
360
361 template <>
362 void ArgsBuffer<int32_t>::Mutate() {
363 uint32_t base = 1111111111u * seed_;
364 for (int j = 0; j < count_; j++) {
365 input[j] = static_cast<int32_t>(256 + base + j + seed_ * 13);
366 }
367 output = -1;
368 seed_++;
369 }
370
371
372 template <>
373 void ArgsBuffer<int64_t>::Mutate() {
374 uint64_t base = 11111111111111111ull * seed_;
375 for (int j = 0; j < count_; j++) {
376 input[j] = static_cast<int64_t>(256 + base + j + seed_ * 13);
377 }
378 output = -1;
379 seed_++;
380 }
381
382
383 template <>
384 void ArgsBuffer<float32>::Mutate() {
385 float64 base = -33.25 * seed_;
386 for (int j = 0; j < count_; j++) {
387 input[j] = 256 + base + j + seed_ * 13;
388 }
389 output = std::numeric_limits<float32>::quiet_NaN();
390 seed_++;
391 }
392
393
394 template <>
395 void ArgsBuffer<float64>::Mutate() {
396 float64 base = -111.25 * seed_;
397 for (int j = 0; j < count_; j++) {
398 input[j] = 256 + base + j + seed_ * 13;
399 }
400 output = std::numeric_limits<float64>::quiet_NaN();
401 seed_++;
402 }
403
404
405 template <typename CType>
406 void ArgsBuffer<CType>::Mutate() {
Michael Starzinger 2015/08/12 17:48:41 Can we just skip the non-specialized implementatio
407 UNIMPLEMENTED();
408 }
409
410
411 int ParamCount(CallDescriptor* desc) {
412 return static_cast<int>(desc->GetMachineSignature()->parameter_count());
413 }
414
415
416 template <typename CType>
417 class Computer {
418 public:
419 static void Run(CallDescriptor* desc,
420 void (*build)(CallDescriptor*, RawMachineAssembler&),
421 CType (*compute)(CallDescriptor*, CType* inputs),
422 int seed = 1) {
423 int num_params = ParamCount(desc);
424 CHECK_LE(num_params, kMaxParamCount);
425 Isolate* isolate = CcTest::InitIsolateOnce();
426 HandleScope scope(isolate);
427 Handle<Code> inner = Handle<Code>::null();
428 {
429 // Build the graph for the computation.
430 Zone zone;
431 Graph graph(&zone);
432 RawMachineAssembler raw(isolate, &graph, desc);
433 build(desc, raw);
434 inner = CompileGraph("Compute", desc, &graph, raw.Export());
435 }
436
437 CSignature0<int32_t> csig;
438 ArgsBuffer<CType> io(num_params, seed);
439
440 {
441 // constant mode.
442 Handle<Code> wrapper = Handle<Code>::null();
443 {
444 // Wrap the above code with a callable function that passes constants.
445 Zone zone;
446 Graph graph(&zone);
447 CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
448 RawMachineAssembler raw(isolate, &graph, cdesc);
449 Unique<HeapObject> unique =
450 Unique<HeapObject>::CreateUninitialized(inner);
451 Node* target = raw.HeapConstant(unique);
452 Node** args = zone.NewArray<Node*>(kMaxParamCount);
453 for (int i = 0; i < num_params; i++) {
454 args[i] = io.MakeConstant(raw, io.input[i]);
455 }
456
457 Node* call = raw.CallN(desc, target, args);
458 Node* store = io.StoreOutput(raw, call);
459 USE(store);
460 raw.Return(raw.Int32Constant(seed));
461 wrapper =
462 CompileGraph("Compute-wrapper-const", cdesc, &graph, raw.Export());
463 }
464
465 CodeRunner<int32_t> runnable(isolate, wrapper, &csig);
466
467 // Run the code, checking it against the reference.
468 CType expected = compute(desc, io.input);
469 int32_t check_seed = runnable.Call();
470 CHECK_EQ(seed, check_seed);
471 CHECK_EQ(expected, io.output);
472 }
473
474 {
475 // buffer mode.
476 Handle<Code> wrapper = Handle<Code>::null();
477 {
478 // Wrap the above code with a callable function that loads from {input}.
479 Zone zone;
480 Graph graph(&zone);
481 CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
482 RawMachineAssembler raw(isolate, &graph, cdesc);
483 Node* base = raw.PointerConstant(io.input);
484 Unique<HeapObject> unique =
485 Unique<HeapObject>::CreateUninitialized(inner);
486 Node* target = raw.HeapConstant(unique);
487 Node** args = zone.NewArray<Node*>(kMaxParamCount);
488 for (int i = 0; i < num_params; i++) {
489 args[i] = io.LoadInput(raw, base, i);
490 }
491
492 Node* call = raw.CallN(desc, target, args);
493 Node* store = io.StoreOutput(raw, call);
494 USE(store);
495 raw.Return(raw.Int32Constant(seed));
496 wrapper = CompileGraph("Compute-wrapper", cdesc, &graph, raw.Export());
497 }
498
499 CodeRunner<int32_t> runnable(isolate, wrapper, &csig);
500
501 // Run the code, checking it against the reference.
502 for (int i = 0; i < 5; i++) {
503 CType expected = compute(desc, io.input);
504 int32_t check_seed = runnable.Call();
505 CHECK_EQ(seed, check_seed);
506 CHECK_EQ(expected, io.output);
507 io.Mutate();
508 }
509 }
510 }
511 };
512
268 } // namespace 513 } // namespace
269 514
270 515
271 static void TestInt32Sub(CallDescriptor* desc) { 516 static void TestInt32Sub(CallDescriptor* desc) {
272 Isolate* isolate = CcTest::InitIsolateOnce(); 517 Isolate* isolate = CcTest::InitIsolateOnce();
273 HandleScope scope(isolate); 518 HandleScope scope(isolate);
274 Zone zone; 519 Zone zone;
275 GraphAndBuilders inner(&zone); 520 GraphAndBuilders inner(&zone);
276 { 521 {
277 // Build the add function. 522 // Build the add function.
(...skipping 18 matching lines...)
296 FOR_INT32_INPUTS(j) { 541 FOR_INT32_INPUTS(j) {
297 int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*i) - 542 int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*i) -
298 static_cast<uint32_t>(*j)); 543 static_cast<uint32_t>(*j));
299 int32_t result = runnable.Call(*i, *j); 544 int32_t result = runnable.Call(*i, *j);
300 CHECK_EQ(expected, result); 545 CHECK_EQ(expected, result);
301 } 546 }
302 } 547 }
303 } 548 }
304 549
305 550
306 #ifdef NATIVE_STACK_PARAMS_OK
307 static void CopyTwentyInt32(CallDescriptor* desc) { 551 static void CopyTwentyInt32(CallDescriptor* desc) {
552 if (DISABLE_NATIVE_STACK_PARAMS) return;
553
308 const int kNumParams = 20; 554 const int kNumParams = 20;
309 int32_t input[kNumParams]; 555 int32_t input[kNumParams];
310 int32_t output[kNumParams]; 556 int32_t output[kNumParams];
311 Isolate* isolate = CcTest::InitIsolateOnce(); 557 Isolate* isolate = CcTest::InitIsolateOnce();
312 HandleScope scope(isolate); 558 HandleScope scope(isolate);
313 Handle<Code> inner = Handle<Code>::null(); 559 Handle<Code> inner = Handle<Code>::null();
314 { 560 {
315 // Writes all parameters into the output buffer. 561 // Writes all parameters into the output buffer.
316 Zone zone; 562 Zone zone;
317 Graph graph(&zone); 563 Graph graph(&zone);
(...skipping 40 matching lines...)
358 } 604 }
359 605
360 memset(output, 0, sizeof(output)); 606 memset(output, 0, sizeof(output));
361 CHECK_EQ(42, runnable.Call()); 607 CHECK_EQ(42, runnable.Call());
362 608
363 for (int j = 0; j < kNumParams; j++) { 609 for (int j = 0; j < kNumParams; j++) {
364 CHECK_EQ(input[j], output[j]); 610 CHECK_EQ(input[j], output[j]);
365 } 611 }
366 } 612 }
367 } 613 }
368 #endif // NATIVE_STACK_PARAMS_OK
369 614
370 615
371 static void Test_RunInt32SubWithRet(int retreg) { 616 static void Test_RunInt32SubWithRet(int retreg) {
372 Int32Signature sig(2); 617 Int32Signature sig(2);
373 Zone zone; 618 Zone zone;
374 RegisterPairs pairs; 619 RegisterPairs pairs;
375 while (pairs.More()) { 620 while (pairs.More()) {
376 int parray[2]; 621 int parray[2];
377 int rarray[] = {retreg}; 622 int rarray[] = {retreg};
378 pairs.Next(&parray[0], &parray[1], false); 623 pairs.Next(&parray[0], &parray[1], false);
(...skipping 29 matching lines...)
408 TEST_INT32_SUB_WITH_RET(13) 653 TEST_INT32_SUB_WITH_RET(13)
409 TEST_INT32_SUB_WITH_RET(14) 654 TEST_INT32_SUB_WITH_RET(14)
410 TEST_INT32_SUB_WITH_RET(15) 655 TEST_INT32_SUB_WITH_RET(15)
411 TEST_INT32_SUB_WITH_RET(16) 656 TEST_INT32_SUB_WITH_RET(16)
412 TEST_INT32_SUB_WITH_RET(17) 657 TEST_INT32_SUB_WITH_RET(17)
413 TEST_INT32_SUB_WITH_RET(18) 658 TEST_INT32_SUB_WITH_RET(18)
414 TEST_INT32_SUB_WITH_RET(19) 659 TEST_INT32_SUB_WITH_RET(19)
415 660
416 661
417 TEST(Run_Int32Sub_all_allocatable_single) { 662 TEST(Run_Int32Sub_all_allocatable_single) {
418 #ifdef NATIVE_STACK_PARAMS_OK 663 if (DISABLE_NATIVE_STACK_PARAMS) return;
419 Int32Signature sig(2); 664 Int32Signature sig(2);
420 RegisterPairs pairs; 665 RegisterPairs pairs;
421 while (pairs.More()) { 666 while (pairs.More()) {
422 Zone zone; 667 Zone zone;
423 int parray[1]; 668 int parray[1];
424 int rarray[1]; 669 int rarray[1];
425 pairs.Next(&rarray[0], &parray[0], true); 670 pairs.Next(&rarray[0], &parray[0], true);
426 Allocator params(parray, 1, nullptr, 0); 671 Allocator params(parray, 1, nullptr, 0);
427 Allocator rets(rarray, 1, nullptr, 0); 672 Allocator rets(rarray, 1, nullptr, 0);
428 RegisterConfig config(params, rets); 673 RegisterConfig config(params, rets);
429 CallDescriptor* desc = config.Create(&zone, &sig); 674 CallDescriptor* desc = config.Create(&zone, &sig);
430 TestInt32Sub(desc); 675 TestInt32Sub(desc);
431 } 676 }
432 #endif // NATIVE_STACK_PARAMS_OK
433 } 677 }
434 678
435 679
436 TEST(Run_CopyTwentyInt32_all_allocatable_pairs) { 680 TEST(Run_CopyTwentyInt32_all_allocatable_pairs) {
437 #ifdef NATIVE_STACK_PARAMS_OK 681 if (DISABLE_NATIVE_STACK_PARAMS) return;
438 Int32Signature sig(20); 682 Int32Signature sig(20);
439 RegisterPairs pairs; 683 RegisterPairs pairs;
440 while (pairs.More()) { 684 while (pairs.More()) {
441 Zone zone; 685 Zone zone;
442 int parray[2]; 686 int parray[2];
443 int rarray[] = {0}; 687 int rarray[] = {0};
444 pairs.Next(&parray[0], &parray[1], false); 688 pairs.Next(&parray[0], &parray[1], false);
445 Allocator params(parray, 2, nullptr, 0); 689 Allocator params(parray, 2, nullptr, 0);
446 Allocator rets(rarray, 1, nullptr, 0); 690 Allocator rets(rarray, 1, nullptr, 0);
447 RegisterConfig config(params, rets); 691 RegisterConfig config(params, rets);
448 CallDescriptor* desc = config.Create(&zone, &sig); 692 CallDescriptor* desc = config.Create(&zone, &sig);
449 CopyTwentyInt32(desc); 693 CopyTwentyInt32(desc);
450 } 694 }
451 #endif // NATIVE_STACK_PARAMS_OK
452 }
453
454
455 #ifdef NATIVE_STACK_PARAMS_OK
456 int ParamCount(CallDescriptor* desc) {
457 return static_cast<int>(desc->GetMachineSignature()->parameter_count());
458 }
459
460
461 // Super mega helper routine to generate a computation with a given
462 // call descriptor, compile the code, wrap the code, and pass various inputs,
463 // comparing against a reference implementation.
464 static void Run_Int32_Computation(
465 CallDescriptor* desc, void (*build)(CallDescriptor*, RawMachineAssembler&),
466 int32_t (*compute)(CallDescriptor*, int32_t* inputs), int seed = 1) {
467 int num_params = ParamCount(desc);
468 CHECK_LE(num_params, kMaxParamCount);
469 int32_t input[kMaxParamCount];
470 Isolate* isolate = CcTest::InitIsolateOnce();
471 HandleScope scope(isolate);
472 Handle<Code> inner = Handle<Code>::null();
473 {
474 // Build the graph for the computation.
475 Zone zone;
476 Graph graph(&zone);
477 RawMachineAssembler raw(isolate, &graph, desc);
478 build(desc, raw);
479 inner = CompileGraph("Compute", desc, &graph, raw.Export());
480 }
481
482 CSignature0<int32_t> csig;
483
484 if (false) {
485 // constant mode.
486 Handle<Code> wrapper = Handle<Code>::null();
487 {
488 // Wrap the above code with a callable function that passes constants.
489 Zone zone;
490 Graph graph(&zone);
491 CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
492 RawMachineAssembler raw(isolate, &graph, cdesc);
493 Unique<HeapObject> unique =
494 Unique<HeapObject>::CreateUninitialized(inner);
495 Node* target = raw.HeapConstant(unique);
496 Node** args = zone.NewArray<Node*>(kMaxParamCount);
497 for (int i = 0; i < num_params; i++) {
498 args[i] = raw.Int32Constant(0x100 + i);
499 }
500
501 Node* call = raw.CallN(desc, target, args);
502 raw.Return(call);
503 wrapper = CompileGraph("Compute-wrapper", cdesc, &graph, raw.Export());
504 }
505
506 CodeRunner<int32_t> runnable(isolate, wrapper, &csig);
507
508 // Run the code, checking it against the reference.
509 for (int j = 0; j < kMaxParamCount; j++) {
510 input[j] = 0x100 + j;
511 }
512 int32_t expected = compute(desc, input);
513 int32_t result = runnable.Call();
514
515 CHECK_EQ(expected, result);
516 }
517
518 {
519 // buffer mode.
520 Handle<Code> wrapper = Handle<Code>::null();
521 {
522 // Wrap the above code with a callable function that loads from {input}.
523 Zone zone;
524 Graph graph(&zone);
525 CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
526 RawMachineAssembler raw(isolate, &graph, cdesc);
527 Node* base = raw.PointerConstant(input);
528 Unique<HeapObject> unique =
529 Unique<HeapObject>::CreateUninitialized(inner);
530 Node* target = raw.HeapConstant(unique);
531 Node** args = zone.NewArray<Node*>(kMaxParamCount);
532 for (int i = 0; i < num_params; i++) {
533 Node* offset = raw.Int32Constant(i * sizeof(int32_t));
534 args[i] = raw.Load(kMachInt32, base, offset);
535 }
536
537 Node* call = raw.CallN(desc, target, args);
538 raw.Return(call);
539 wrapper = CompileGraph("Compute-wrapper", cdesc, &graph, raw.Export());
540 }
541
542 CodeRunner<int32_t> runnable(isolate, wrapper, &csig);
543
544 // Run the code, checking it against the reference.
545 for (int i = 0; i < 5; i++) {
546 // Use pseudo-random values for each run, but the first run gets args
547 // 100, 101, 102, 103... for easier diagnosis.
548 uint32_t base = 1111111111u * i * seed;
549 for (int j = 0; j < kMaxParamCount; j++) {
550 input[j] = static_cast<int32_t>(100 + base + j);
551 }
552 int32_t expected = compute(desc, input);
553 int32_t result = runnable.Call();
554
555 CHECK_EQ(expected, result);
556 }
557 }
558 } 695 }
559 696
560 697
698 template <typename CType>
699 static void Run_Computation(
700 CallDescriptor* desc, void (*build)(CallDescriptor*, RawMachineAssembler&),
701 CType (*compute)(CallDescriptor*, CType* inputs), int seed = 1) {
702 Computer<CType>::Run(desc, build, compute, seed);
703 }
704
705
561 static uint32_t coeff[] = {1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 706 static uint32_t coeff[] = {1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
562 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 707 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73,
563 79, 83, 89, 97, 101, 103, 107, 109, 113}; 708 79, 83, 89, 97, 101, 103, 107, 109, 113};
564 709
565 710
566 static void Build_Int32_WeightedSum(CallDescriptor* desc, 711 static void Build_Int32_WeightedSum(CallDescriptor* desc,
567 RawMachineAssembler& raw) { 712 RawMachineAssembler& raw) {
568 Node* result = raw.Int32Constant(0); 713 Node* result = raw.Int32Constant(0);
569 for (int i = 0; i < ParamCount(desc); i++) { 714 for (int i = 0; i < ParamCount(desc); i++) {
570 Node* term = raw.Int32Mul(raw.Parameter(i), raw.Int32Constant(coeff[i])); 715 Node* term = raw.Int32Mul(raw.Parameter(i), raw.Int32Constant(coeff[i]));
571 result = raw.Int32Add(result, term); 716 result = raw.Int32Add(result, term);
572 } 717 }
573 raw.Return(result); 718 raw.Return(result);
574 } 719 }
575 720
576 721
577 static int32_t Compute_Int32_WeightedSum(CallDescriptor* desc, int32_t* input) { 722 static int32_t Compute_Int32_WeightedSum(CallDescriptor* desc, int32_t* input) {
578 uint32_t result = 0; 723 uint32_t result = 0;
579 for (int i = 0; i < ParamCount(desc); i++) { 724 for (int i = 0; i < ParamCount(desc); i++) {
580 result += static_cast<uint32_t>(input[i]) * coeff[i]; 725 result += static_cast<uint32_t>(input[i]) * coeff[i];
581 } 726 }
582 return static_cast<int32_t>(result); 727 return static_cast<int32_t>(result);
583 } 728 }
584 729
585 730
586 static void Test_Int32_WeightedSum_of_size(int count) { 731 static void Test_Int32_WeightedSum_of_size(int count) {
732 if (DISABLE_NATIVE_STACK_PARAMS) return;
587 Int32Signature sig(count); 733 Int32Signature sig(count);
588 for (int p0 = 0; p0 < Register::kMaxNumAllocatableRegisters; p0++) { 734 for (int p0 = 0; p0 < Register::kMaxNumAllocatableRegisters; p0++) {
589 Zone zone; 735 Zone zone;
590 736
591 int parray[] = {p0}; 737 int parray[] = {p0};
592 int rarray[] = {0}; 738 int rarray[] = {0};
593 Allocator params(parray, 1, nullptr, 0); 739 Allocator params(parray, 1, nullptr, 0);
594 Allocator rets(rarray, 1, nullptr, 0); 740 Allocator rets(rarray, 1, nullptr, 0);
595 RegisterConfig config(params, rets); 741 RegisterConfig config(params, rets);
596 CallDescriptor* desc = config.Create(&zone, &sig); 742 CallDescriptor* desc = config.Create(&zone, &sig);
597 Run_Int32_Computation(desc, Build_Int32_WeightedSum, 743 Run_Computation<int32_t>(desc, Build_Int32_WeightedSum,
598 Compute_Int32_WeightedSum, 257 + count); 744 Compute_Int32_WeightedSum, 257 + count);
599 } 745 }
600 } 746 }
601 747
602 748
603 // Separate tests for parallelization. 749 // Separate tests for parallelization.
604 #define TEST_INT32_WEIGHTEDSUM(x) \ 750 #define TEST_INT32_WEIGHTEDSUM(x) \
605 TEST(Run_Int32_WeightedSum_##x) { Test_Int32_WeightedSum_of_size(x); } 751 TEST(Run_Int32_WeightedSum_##x) { Test_Int32_WeightedSum_of_size(x); }
606 752
607 753
608 TEST_INT32_WEIGHTEDSUM(1) 754 TEST_INT32_WEIGHTEDSUM(1)
609 TEST_INT32_WEIGHTEDSUM(2) 755 TEST_INT32_WEIGHTEDSUM(2)
610 TEST_INT32_WEIGHTEDSUM(3) 756 TEST_INT32_WEIGHTEDSUM(3)
611 TEST_INT32_WEIGHTEDSUM(4) 757 TEST_INT32_WEIGHTEDSUM(4)
612 TEST_INT32_WEIGHTEDSUM(5) 758 TEST_INT32_WEIGHTEDSUM(5)
613 TEST_INT32_WEIGHTEDSUM(7) 759 TEST_INT32_WEIGHTEDSUM(7)
614 TEST_INT32_WEIGHTEDSUM(9) 760 TEST_INT32_WEIGHTEDSUM(9)
615 TEST_INT32_WEIGHTEDSUM(11) 761 TEST_INT32_WEIGHTEDSUM(11)
616 TEST_INT32_WEIGHTEDSUM(17) 762 TEST_INT32_WEIGHTEDSUM(17)
617 TEST_INT32_WEIGHTEDSUM(19) 763 TEST_INT32_WEIGHTEDSUM(19)
618 764
619 765
620 template <int which> 766 template <int which>
621 static void Build_Int32_Select(CallDescriptor* desc, RawMachineAssembler& raw) { 767 static void Build_Select(CallDescriptor* desc, RawMachineAssembler& raw) {
622 raw.Return(raw.Parameter(which)); 768 raw.Return(raw.Parameter(which));
623 } 769 }
624 770
625 771
626 template <int which> 772 template <typename CType, int which>
627 static int32_t Compute_Int32_Select(CallDescriptor* desc, int32_t* inputs) { 773 static CType Compute_Select(CallDescriptor* desc, CType* inputs) {
628 return inputs[which]; 774 return inputs[which];
629 } 775 }
630 776
631 777
632 template <int which> 778 template <int which>
633 void Test_Int32_Select() { 779 void Test_Int32_Select() {
780 if (DISABLE_NATIVE_STACK_PARAMS) return;
781
634 int parray[] = {0}; 782 int parray[] = {0};
635 int rarray[] = {0}; 783 int rarray[] = {0};
636 Allocator params(parray, 1, nullptr, 0); 784 Allocator params(parray, 1, nullptr, 0);
637 Allocator rets(rarray, 1, nullptr, 0); 785 Allocator rets(rarray, 1, nullptr, 0);
638 RegisterConfig config(params, rets); 786 RegisterConfig config(params, rets);
639 787
640 Zone zone; 788 Zone zone;
641 789
642 for (int i = which + 1; i <= 64; i++) { 790 for (int i = which + 1; i <= 64; i++) {
643 Int32Signature sig(i); 791 Int32Signature sig(i);
644 CallDescriptor* desc = config.Create(&zone, &sig); 792 CallDescriptor* desc = config.Create(&zone, &sig);
645 Run_Int32_Computation(desc, Build_Int32_Select<which>, 793 Run_Computation<int32_t>(desc, Build_Select<which>,
646 Compute_Int32_Select<which>, 1025 + which); 794 Compute_Select<int32_t, which>, 1025 + which);
647 } 795 }
648 } 796 }
649 797
650 798
651 // Separate tests for parallelization. 799 // Separate tests for parallelization.
652 #define TEST_INT32_SELECT(x) \ 800 #define TEST_INT32_SELECT(x) \
653 TEST(Run_Int32_Select_##x) { Test_Int32_Select<x>(); } 801 TEST(Run_Int32_Select_##x) { Test_Int32_Select<x>(); }
654 802
655 803
656 TEST_INT32_SELECT(0) 804 TEST_INT32_SELECT(0)
657 TEST_INT32_SELECT(1) 805 TEST_INT32_SELECT(1)
658 TEST_INT32_SELECT(2) 806 TEST_INT32_SELECT(2)
659 TEST_INT32_SELECT(3) 807 TEST_INT32_SELECT(3)
660 TEST_INT32_SELECT(4) 808 TEST_INT32_SELECT(4)
661 TEST_INT32_SELECT(5) 809 TEST_INT32_SELECT(5)
662 TEST_INT32_SELECT(6) 810 TEST_INT32_SELECT(6)
663 TEST_INT32_SELECT(11) 811 TEST_INT32_SELECT(11)
664 TEST_INT32_SELECT(15) 812 TEST_INT32_SELECT(15)
665 TEST_INT32_SELECT(19) 813 TEST_INT32_SELECT(19)
666 TEST_INT32_SELECT(45) 814 TEST_INT32_SELECT(45)
667 TEST_INT32_SELECT(62) 815 TEST_INT32_SELECT(62)
668 TEST_INT32_SELECT(63) 816 TEST_INT32_SELECT(63)
669 #endif // NATIVE_STACK_PARAMS_OK
670 817
671 818
672 TEST(TheLastTestForLint) { 819 TEST(Int64Select_registers) {
673 // Yes, thank you. 820 if (Register::kMaxNumAllocatableRegisters < 2) return;
821 if (kPointerSize < 8) return; // TODO(titzer): int64 on 32-bit platforms
822
823 int rarray[] = {0};
824 ArgsBuffer<int64_t>::Sig sig(2);
825
826 RegisterPairs pairs;
827 Zone zone;
828 while (pairs.More()) {
829 int parray[2];
830 pairs.Next(&parray[0], &parray[1], false);
831 Allocator params(parray, 2, nullptr, 0);
832 Allocator rets(rarray, 1, nullptr, 0);
833 RegisterConfig config(params, rets);
834
835 CallDescriptor* desc = config.Create(&zone, &sig);
836 Run_Computation<int64_t>(desc, Build_Select<0>, Compute_Select<int64_t, 0>,
837 1021);
838
839 Run_Computation<int64_t>(desc, Build_Select<1>, Compute_Select<int64_t, 1>,
840 1022);
841 }
674 } 842 }
843
844
845 TEST(Float32Select_registers) {
846 if (DoubleRegister::kMaxNumAllocatableRegisters < 2) return;
847
848 int rarray[] = {0};
849 ArgsBuffer<float32>::Sig sig(2);
850
851 Float32RegisterPairs pairs;
852 Zone zone;
853 while (pairs.More()) {
854 int parray[2];
855 pairs.Next(&parray[0], &parray[1], false);
856 Allocator params(nullptr, 0, parray, 2);
857 Allocator rets(nullptr, 0, rarray, 1);
858 RegisterConfig config(params, rets);
859
860 CallDescriptor* desc = config.Create(&zone, &sig);
861 Run_Computation<float32>(desc, Build_Select<0>, Compute_Select<float32, 0>,
862 1019);
863
864 Run_Computation<float32>(desc, Build_Select<1>, Compute_Select<float32, 1>,
865 1018);
866 }
867 }
868
869
870 TEST(Float64Select_registers) {
871 if (DoubleRegister::kMaxNumAllocatableRegisters < 2) return;
872
873 int rarray[] = {0};
874 ArgsBuffer<float64>::Sig sig(2);
875
876 Float64RegisterPairs pairs;
877 Zone zone;
878 while (pairs.More()) {
879 int parray[2];
880 pairs.Next(&parray[0], &parray[1], false);
881 Allocator params(nullptr, 0, parray, 2);
882 Allocator rets(nullptr, 0, rarray, 1);
883 RegisterConfig config(params, rets);
884
885 CallDescriptor* desc = config.Create(&zone, &sig);
886 Run_Computation<float64>(desc, Build_Select<0>, Compute_Select<float64, 0>,
887 1033);
888
889 Run_Computation<float64>(desc, Build_Select<1>, Compute_Select<float64, 1>,
890 1034);
891 }
892 }
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine