| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. Use of this | 1 // Copyright 2014 the V8 project authors. All rights reserved. Use of this |
| 2 // source code is governed by a BSD-style license that can be found in the | 2 // source code is governed by a BSD-style license that can be found in the |
| 3 // LICENSE file. | 3 // LICENSE file. |
| 4 | 4 |
| 5 // TODO(jochen): Remove this after the setting is turned on globally. | 5 // TODO(jochen): Remove this after the setting is turned on globally. |
| 6 #define V8_IMMINENT_DEPRECATION_WARNINGS | 6 #define V8_IMMINENT_DEPRECATION_WARNINGS |
| 7 | 7 |
| 8 #include <cmath> | 8 #include <cmath> |
| 9 #include <functional> | 9 #include <functional> |
| 10 #include <limits> | 10 #include <limits> |
| (...skipping 3219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3230 RunLoadStore<int16_t>(kMachInt16); | 3230 RunLoadStore<int16_t>(kMachInt16); |
| 3231 RunLoadStore<uint16_t>(kMachUint16); | 3231 RunLoadStore<uint16_t>(kMachUint16); |
| 3232 RunLoadStore<int32_t>(kMachInt32); | 3232 RunLoadStore<int32_t>(kMachInt32); |
| 3233 RunLoadStore<uint32_t>(kMachUint32); | 3233 RunLoadStore<uint32_t>(kMachUint32); |
| 3234 RunLoadStore<void*>(kMachAnyTagged); | 3234 RunLoadStore<void*>(kMachAnyTagged); |
| 3235 RunLoadStore<float>(kMachFloat32); | 3235 RunLoadStore<float>(kMachFloat32); |
| 3236 RunLoadStore<double>(kMachFloat64); | 3236 RunLoadStore<double>(kMachFloat64); |
| 3237 } | 3237 } |
| 3238 | 3238 |
| 3239 | 3239 |
| 3240 TEST(RunFloat32Binop) { | 3240 TEST(RunFloat32Add) { |
| 3241 RawMachineAssemblerTester<int32_t> m; | 3241 BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32); |
| 3242 float result; | 3242 m.Return(m.Float32Add(m.Parameter(0), m.Parameter(1))); |
| 3243 | 3243 |
| 3244 const Operator* ops[] = {m.machine()->Float32Add(), m.machine()->Float32Sub(), | 3244 FOR_FLOAT32_INPUTS(i) { |
| 3245 m.machine()->Float32Mul(), m.machine()->Float32Div(), | 3245 FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i + *j, m.Call(*i, *j)); } |
| 3246 NULL}; | |
| 3247 | |
| 3248 float inf = std::numeric_limits<float>::infinity(); | |
| 3249 const Operator* inputs[] = { | |
| 3250 m.common()->Float32Constant(0.0f), m.common()->Float32Constant(1.0f), | |
| 3251 m.common()->Float32Constant(1.0f), m.common()->Float32Constant(0.0f), | |
| 3252 m.common()->Float32Constant(0.0f), m.common()->Float32Constant(-1.0f), | |
| 3253 m.common()->Float32Constant(-1.0f), m.common()->Float32Constant(0.0f), | |
| 3254 m.common()->Float32Constant(0.22f), m.common()->Float32Constant(-1.22f), | |
| 3255 m.common()->Float32Constant(-1.22f), m.common()->Float32Constant(0.22f), | |
| 3256 m.common()->Float32Constant(inf), m.common()->Float32Constant(0.22f), | |
| 3257 m.common()->Float32Constant(inf), m.common()->Float32Constant(-inf), | |
| 3258 NULL}; | |
| 3259 | |
| 3260 for (int i = 0; ops[i] != NULL; i++) { | |
| 3261 for (int j = 0; inputs[j] != NULL; j += 2) { | |
| 3262 RawMachineAssemblerTester<int32_t> m; | |
| 3263 Node* a = m.AddNode(inputs[j]); | |
| 3264 Node* b = m.AddNode(inputs[j + 1]); | |
| 3265 Node* binop = m.AddNode(ops[i], a, b); | |
| 3266 Node* base = m.PointerConstant(&result); | |
| 3267 Node* zero = m.IntPtrConstant(0); | |
| 3268 m.Store(kMachFloat32, base, zero, binop, kNoWriteBarrier); | |
| 3269 m.Return(m.Int32Constant(i + j)); | |
| 3270 CHECK_EQ(i + j, m.Call()); | |
| 3271 } | |
| 3272 } | 3246 } |
| 3273 } | 3247 } |
| 3274 | 3248 |
| 3275 | 3249 |
| 3276 TEST(RunFloat64Binop) { | 3250 TEST(RunFloat32Sub) { |
| 3277 RawMachineAssemblerTester<int32_t> m; | 3251 BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32); |
| 3278 double result; | 3252 m.Return(m.Float32Sub(m.Parameter(0), m.Parameter(1))); |
| 3279 | 3253 |
| 3280 const Operator* ops[] = {m.machine()->Float64Add(), m.machine()->Float64Sub(), | 3254 FOR_FLOAT32_INPUTS(i) { |
| 3281 m.machine()->Float64Mul(), m.machine()->Float64Div(), | 3255 FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i - *j, m.Call(*i, *j)); } |
| 3282 m.machine()->Float64Mod(), NULL}; | |
| 3283 | |
| 3284 double inf = V8_INFINITY; | |
| 3285 const Operator* inputs[] = { | |
| 3286 m.common()->Float64Constant(0), m.common()->Float64Constant(1), | |
| 3287 m.common()->Float64Constant(1), m.common()->Float64Constant(0), | |
| 3288 m.common()->Float64Constant(0), m.common()->Float64Constant(-1), | |
| 3289 m.common()->Float64Constant(-1), m.common()->Float64Constant(0), | |
| 3290 m.common()->Float64Constant(0.22), m.common()->Float64Constant(-1.22), | |
| 3291 m.common()->Float64Constant(-1.22), m.common()->Float64Constant(0.22), | |
| 3292 m.common()->Float64Constant(inf), m.common()->Float64Constant(0.22), | |
| 3293 m.common()->Float64Constant(inf), m.common()->Float64Constant(-inf), | |
| 3294 NULL}; | |
| 3295 | |
| 3296 for (int i = 0; ops[i] != NULL; i++) { | |
| 3297 for (int j = 0; inputs[j] != NULL; j += 2) { | |
| 3298 RawMachineAssemblerTester<int32_t> m; | |
| 3299 Node* a = m.AddNode(inputs[j]); | |
| 3300 Node* b = m.AddNode(inputs[j + 1]); | |
| 3301 Node* binop = m.AddNode(ops[i], a, b); | |
| 3302 Node* base = m.PointerConstant(&result); | |
| 3303 Node* zero = m.Int32Constant(0); | |
| 3304 m.Store(kMachFloat64, base, zero, binop, kNoWriteBarrier); | |
| 3305 m.Return(m.Int32Constant(i + j)); | |
| 3306 CHECK_EQ(i + j, m.Call()); | |
| 3307 } | |
| 3308 } | 3256 } |
| 3309 } | 3257 } |
| 3310 | 3258 |
| 3259 |
| 3260 TEST(RunFloat32Mul) { |
| 3261 BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32); |
| 3262 m.Return(m.Float32Mul(m.Parameter(0), m.Parameter(1))); |
| 3263 |
| 3264 FOR_FLOAT32_INPUTS(i) { |
| 3265 FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i * *j, m.Call(*i, *j)); } |
| 3266 } |
| 3267 } |
| 3268 |
| 3269 |
| 3270 TEST(RunFloat32Div) { |
| 3271 BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32); |
| 3272 m.Return(m.Float32Div(m.Parameter(0), m.Parameter(1))); |
| 3273 |
| 3274 FOR_FLOAT32_INPUTS(i) { |
| 3275 FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i / *j, m.Call(*i, *j)); } |
| 3276 } |
| 3277 } |
| 3278 |
| 3279 |
| 3280 TEST(RunFloat64Add) { |
| 3281 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); |
| 3282 m.Return(m.Float64Add(m.Parameter(0), m.Parameter(1))); |
| 3283 |
| 3284 FOR_FLOAT64_INPUTS(i) { |
| 3285 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i + *j, m.Call(*i, *j)); } |
| 3286 } |
| 3287 } |
| 3288 |
| 3289 |
| 3290 TEST(RunFloat64Sub) { |
| 3291 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); |
| 3292 m.Return(m.Float64Sub(m.Parameter(0), m.Parameter(1))); |
| 3293 |
| 3294 FOR_FLOAT64_INPUTS(i) { |
| 3295 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i - *j, m.Call(*i, *j)); } |
| 3296 } |
| 3297 } |
| 3298 |
| 3299 |
| 3300 TEST(RunFloat64Mul) { |
| 3301 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); |
| 3302 m.Return(m.Float64Mul(m.Parameter(0), m.Parameter(1))); |
| 3303 |
| 3304 FOR_FLOAT64_INPUTS(i) { |
| 3305 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i * *j, m.Call(*i, *j)); } |
| 3306 } |
| 3307 } |
| 3308 |
| 3309 |
| 3310 TEST(RunFloat64Div) { |
| 3311 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); |
| 3312 m.Return(m.Float64Div(m.Parameter(0), m.Parameter(1))); |
| 3313 |
| 3314 FOR_FLOAT64_INPUTS(i) { |
| 3315 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i / *j, m.Call(*i, *j)); } |
| 3316 } |
| 3317 } |
| 3318 |
| 3319 |
| 3320 TEST(RunFloat64Mod) { |
| 3321 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); |
| 3322 m.Return(m.Float64Mod(m.Parameter(0), m.Parameter(1))); |
| 3323 |
| 3324 FOR_FLOAT64_INPUTS(i) { |
| 3325 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(modulo(*i, *j), m.Call(*i, *j)); } |
| 3326 } |
| 3327 } |
| 3328 |
| 3311 | 3329 |
| 3312 TEST(RunDeadFloat32Binops) { | 3330 TEST(RunDeadFloat32Binops) { |
| 3313 RawMachineAssemblerTester<int32_t> m; | 3331 RawMachineAssemblerTester<int32_t> m; |
| 3314 | 3332 |
| 3315 const Operator* ops[] = {m.machine()->Float32Add(), m.machine()->Float32Sub(), | 3333 const Operator* ops[] = {m.machine()->Float32Add(), m.machine()->Float32Sub(), |
| 3316 m.machine()->Float32Mul(), m.machine()->Float32Div(), | 3334 m.machine()->Float32Mul(), m.machine()->Float32Div(), |
| 3317 NULL}; | 3335 NULL}; |
| 3318 | 3336 |
| 3319 for (int i = 0; ops[i] != NULL; i++) { | 3337 for (int i = 0; ops[i] != NULL; i++) { |
| 3320 RawMachineAssemblerTester<int32_t> m; | 3338 RawMachineAssemblerTester<int32_t> m; |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3382 FOR_FLOAT32_INPUTS(pl) { | 3400 FOR_FLOAT32_INPUTS(pl) { |
| 3383 FOR_FLOAT32_INPUTS(pr) { | 3401 FOR_FLOAT32_INPUTS(pr) { |
| 3384 float expected = *pl - *pr; | 3402 float expected = *pl - *pr; |
| 3385 CheckFloatEq(expected, bt.call(*pl, *pr)); | 3403 CheckFloatEq(expected, bt.call(*pl, *pr)); |
| 3386 } | 3404 } |
| 3387 } | 3405 } |
| 3388 } | 3406 } |
| 3389 | 3407 |
| 3390 | 3408 |
| 3391 TEST(RunFloat32SubImm1) { | 3409 TEST(RunFloat32SubImm1) { |
| 3392 float input = 0.0f; | 3410 FOR_FLOAT32_INPUTS(i) { |
| 3393 float output = 0.0f; | 3411 BufferedRawMachineAssemblerTester<float> m(kMachFloat32); |
| 3412 m.Return(m.Float32Sub(m.Float32Constant(*i), m.Parameter(0))); |
| 3394 | 3413 |
| 3395 FOR_FLOAT32_INPUTS(i) { | 3414 FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i - *j, m.Call(*j)); } |
| 3396 RawMachineAssemblerTester<int32_t> m; | |
| 3397 Node* t0 = m.LoadFromPointer(&input, kMachFloat32); | |
| 3398 Node* t1 = m.Float32Sub(m.Float32Constant(*i), t0); | |
| 3399 m.StoreToPointer(&output, kMachFloat32, t1); | |
| 3400 m.Return(m.Int32Constant(0)); | |
| 3401 FOR_FLOAT32_INPUTS(j) { | |
| 3402 input = *j; | |
| 3403 float expected = *i - input; | |
| 3404 CHECK_EQ(0, m.Call()); | |
| 3405 CheckFloatEq(expected, output); | |
| 3406 } | |
| 3407 } | 3415 } |
| 3408 } | 3416 } |
| 3409 | 3417 |
| 3410 | 3418 |
| 3411 TEST(RunFloat32SubImm2) { | 3419 TEST(RunFloat32SubImm2) { |
| 3412 float input = 0.0f; | 3420 FOR_FLOAT32_INPUTS(i) { |
| 3413 float output = 0.0f; | 3421 BufferedRawMachineAssemblerTester<float> m(kMachFloat32); |
| 3422 m.Return(m.Float32Sub(m.Parameter(0), m.Float32Constant(*i))); |
| 3414 | 3423 |
| 3415 FOR_FLOAT32_INPUTS(i) { | 3424 FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*j - *i, m.Call(*j)); } |
| 3416 RawMachineAssemblerTester<int32_t> m; | 3425 } |
| 3417 Node* t0 = m.LoadFromPointer(&input, kMachFloat32); | 3426 } |
| 3418 Node* t1 = m.Float32Sub(t0, m.Float32Constant(*i)); | 3427 |
| 3419 m.StoreToPointer(&output, kMachFloat32, t1); | 3428 |
| 3420 m.Return(m.Int32Constant(0)); | 3429 TEST(RunFloat64SubImm1) { |
| 3421 FOR_FLOAT32_INPUTS(j) { | 3430 FOR_FLOAT64_INPUTS(i) { |
| 3422 input = *j; | 3431 BufferedRawMachineAssemblerTester<double> m(kMachFloat64); |
| 3423 float expected = input - *i; | 3432 m.Return(m.Float64Sub(m.Float64Constant(*i), m.Parameter(0))); |
| 3424 CHECK_EQ(0, m.Call()); | 3433 |
| 3425 CheckFloatEq(expected, output); | 3434 FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*i - *j, m.Call(*j)); } |
| 3426 } | 3435 } |
| 3436 } |
| 3437 |
| 3438 |
| 3439 TEST(RunFloat64SubImm2) { |
| 3440 FOR_FLOAT64_INPUTS(i) { |
| 3441 BufferedRawMachineAssemblerTester<double> m(kMachFloat64); |
| 3442 m.Return(m.Float64Sub(m.Parameter(0), m.Float64Constant(*i))); |
| 3443 |
| 3444 FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*j - *i, m.Call(*j)); } |
| 3427 } | 3445 } |
| 3428 } | 3446 } |
| 3429 | 3447 |
| 3430 | 3448 |
| 3431 TEST(RunFloat64SubP) { | 3449 TEST(RunFloat64SubP) { |
| 3432 RawMachineAssemblerTester<int32_t> m; | 3450 RawMachineAssemblerTester<int32_t> m; |
| 3433 Float64BinopTester bt(&m); | 3451 Float64BinopTester bt(&m); |
| 3434 | 3452 |
| 3435 bt.AddReturn(m.Float64Sub(bt.param0, bt.param1)); | 3453 bt.AddReturn(m.Float64Sub(bt.param0, bt.param1)); |
| 3436 | 3454 |
| 3437 FOR_FLOAT64_INPUTS(pl) { | 3455 FOR_FLOAT64_INPUTS(pl) { |
| 3438 FOR_FLOAT64_INPUTS(pr) { | 3456 FOR_FLOAT64_INPUTS(pr) { |
| 3439 double expected = *pl - *pr; | 3457 double expected = *pl - *pr; |
| 3440 CheckDoubleEq(expected, bt.call(*pl, *pr)); | 3458 CheckDoubleEq(expected, bt.call(*pl, *pr)); |
| 3441 } | 3459 } |
| 3442 } | 3460 } |
| 3443 } | 3461 } |
| 3444 | |
| 3445 | |
| 3446 TEST(RunFloat64SubImm1) { | |
| 3447 double input = 0.0; | |
| 3448 double output = 0.0; | |
| 3449 | |
| 3450 FOR_FLOAT64_INPUTS(i) { | |
| 3451 RawMachineAssemblerTester<int32_t> m; | |
| 3452 Node* t0 = m.LoadFromPointer(&input, kMachFloat64); | |
| 3453 Node* t1 = m.Float64Sub(m.Float64Constant(*i), t0); | |
| 3454 m.StoreToPointer(&output, kMachFloat64, t1); | |
| 3455 m.Return(m.Int32Constant(0)); | |
| 3456 FOR_FLOAT64_INPUTS(j) { | |
| 3457 input = *j; | |
| 3458 double expected = *i - input; | |
| 3459 CHECK_EQ(0, m.Call()); | |
| 3460 CheckDoubleEq(expected, output); | |
| 3461 } | |
| 3462 } | |
| 3463 } | |
| 3464 | |
| 3465 | |
| 3466 TEST(RunFloat64SubImm2) { | |
| 3467 double input = 0.0; | |
| 3468 double output = 0.0; | |
| 3469 | |
| 3470 FOR_FLOAT64_INPUTS(i) { | |
| 3471 RawMachineAssemblerTester<int32_t> m; | |
| 3472 Node* t0 = m.LoadFromPointer(&input, kMachFloat64); | |
| 3473 Node* t1 = m.Float64Sub(t0, m.Float64Constant(*i)); | |
| 3474 m.StoreToPointer(&output, kMachFloat64, t1); | |
| 3475 m.Return(m.Int32Constant(0)); | |
| 3476 FOR_FLOAT64_INPUTS(j) { | |
| 3477 input = *j; | |
| 3478 double expected = input - *i; | |
| 3479 CHECK_EQ(0, m.Call()); | |
| 3480 CheckDoubleEq(expected, output); | |
| 3481 } | |
| 3482 } | |
| 3483 } | |
| 3484 | 3462 |
| 3485 | 3463 |
| 3486 TEST(RunFloat32MulP) { | 3464 TEST(RunFloat32MulP) { |
| 3487 RawMachineAssemblerTester<int32_t> m; | 3465 RawMachineAssemblerTester<int32_t> m; |
| 3488 Float32BinopTester bt(&m); | 3466 Float32BinopTester bt(&m); |
| 3489 | 3467 |
| 3490 bt.AddReturn(m.Float32Mul(bt.param0, bt.param1)); | 3468 bt.AddReturn(m.Float32Mul(bt.param0, bt.param1)); |
| 3491 | 3469 |
| 3492 FOR_FLOAT32_INPUTS(pl) { | 3470 FOR_FLOAT32_INPUTS(pl) { |
| 3493 FOR_FLOAT32_INPUTS(pr) { | 3471 FOR_FLOAT32_INPUTS(pr) { |
| (...skipping 12 matching lines...) Expand all Loading... |
| 3506 | 3484 |
| 3507 FOR_FLOAT64_INPUTS(pl) { | 3485 FOR_FLOAT64_INPUTS(pl) { |
| 3508 FOR_FLOAT64_INPUTS(pr) { | 3486 FOR_FLOAT64_INPUTS(pr) { |
| 3509 double expected = *pl * *pr; | 3487 double expected = *pl * *pr; |
| 3510 CheckDoubleEq(expected, bt.call(*pl, *pr)); | 3488 CheckDoubleEq(expected, bt.call(*pl, *pr)); |
| 3511 } | 3489 } |
| 3512 } | 3490 } |
| 3513 } | 3491 } |
| 3514 | 3492 |
| 3515 | 3493 |
| 3516 TEST(RunFloat64MulAndFloat64AddP) { | 3494 TEST(RunFloat64MulAndFloat64Add1) { |
| 3517 double input_a = 0.0; | 3495 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64, |
| 3518 double input_b = 0.0; | 3496 kMachFloat64); |
| 3519 double input_c = 0.0; | 3497 m.Return(m.Float64Add(m.Float64Mul(m.Parameter(0), m.Parameter(1)), |
| 3520 double output = 0.0; | 3498 m.Parameter(2))); |
| 3521 | 3499 |
| 3522 { | 3500 FOR_FLOAT64_INPUTS(i) { |
| 3523 RawMachineAssemblerTester<int32_t> m; | 3501 FOR_FLOAT64_INPUTS(j) { |
| 3524 Node* a = m.LoadFromPointer(&input_a, kMachFloat64); | 3502 FOR_FLOAT64_INPUTS(k) { |
| 3525 Node* b = m.LoadFromPointer(&input_b, kMachFloat64); | 3503 CheckDoubleEq((*i * *j) + *k, m.Call(*i, *j, *k)); |
| 3526 Node* c = m.LoadFromPointer(&input_c, kMachFloat64); | |
| 3527 m.StoreToPointer(&output, kMachFloat64, | |
| 3528 m.Float64Add(m.Float64Mul(a, b), c)); | |
| 3529 m.Return(m.Int32Constant(0)); | |
| 3530 FOR_FLOAT64_INPUTS(i) { | |
| 3531 FOR_FLOAT64_INPUTS(j) { | |
| 3532 FOR_FLOAT64_INPUTS(k) { | |
| 3533 input_a = *i; | |
| 3534 input_b = *j; | |
| 3535 input_c = *k; | |
| 3536 volatile double temp = input_a * input_b; | |
| 3537 volatile double expected = temp + input_c; | |
| 3538 CHECK_EQ(0, m.Call()); | |
| 3539 CheckDoubleEq(expected, output); | |
| 3540 } | |
| 3541 } | |
| 3542 } | |
| 3543 } | |
| 3544 { | |
| 3545 RawMachineAssemblerTester<int32_t> m; | |
| 3546 Node* a = m.LoadFromPointer(&input_a, kMachFloat64); | |
| 3547 Node* b = m.LoadFromPointer(&input_b, kMachFloat64); | |
| 3548 Node* c = m.LoadFromPointer(&input_c, kMachFloat64); | |
| 3549 m.StoreToPointer(&output, kMachFloat64, | |
| 3550 m.Float64Add(a, m.Float64Mul(b, c))); | |
| 3551 m.Return(m.Int32Constant(0)); | |
| 3552 FOR_FLOAT64_INPUTS(i) { | |
| 3553 FOR_FLOAT64_INPUTS(j) { | |
| 3554 FOR_FLOAT64_INPUTS(k) { | |
| 3555 input_a = *i; | |
| 3556 input_b = *j; | |
| 3557 input_c = *k; | |
| 3558 volatile double temp = input_b * input_c; | |
| 3559 volatile double expected = input_a + temp; | |
| 3560 CHECK_EQ(0, m.Call()); | |
| 3561 CheckDoubleEq(expected, output); | |
| 3562 } | |
| 3563 } | 3504 } |
| 3564 } | 3505 } |
| 3565 } | 3506 } |
| 3566 } | 3507 } |
| 3567 | 3508 |
| 3568 | 3509 |
| 3569 TEST(RunFloat64MulAndFloat64SubP) { | 3510 TEST(RunFloat64MulAndFloat64Add2) { |
| 3570 double input_a = 0.0; | 3511 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64, |
| 3571 double input_b = 0.0; | 3512 kMachFloat64); |
| 3572 double input_c = 0.0; | 3513 m.Return(m.Float64Add(m.Parameter(0), |
| 3573 double output = 0.0; | 3514 m.Float64Mul(m.Parameter(1), m.Parameter(2)))); |
| 3574 | |
| 3575 RawMachineAssemblerTester<int32_t> m; | |
| 3576 Node* a = m.LoadFromPointer(&input_a, kMachFloat64); | |
| 3577 Node* b = m.LoadFromPointer(&input_b, kMachFloat64); | |
| 3578 Node* c = m.LoadFromPointer(&input_c, kMachFloat64); | |
| 3579 m.StoreToPointer(&output, kMachFloat64, m.Float64Sub(a, m.Float64Mul(b, c))); | |
| 3580 m.Return(m.Int32Constant(0)); | |
| 3581 | 3515 |
| 3582 FOR_FLOAT64_INPUTS(i) { | 3516 FOR_FLOAT64_INPUTS(i) { |
| 3583 FOR_FLOAT64_INPUTS(j) { | 3517 FOR_FLOAT64_INPUTS(j) { |
| 3584 FOR_FLOAT64_INPUTS(k) { | 3518 FOR_FLOAT64_INPUTS(k) { |
| 3585 input_a = *i; | 3519 CheckDoubleEq(*i + (*j * *k), m.Call(*i, *j, *k)); |
| 3586 input_b = *j; | |
| 3587 input_c = *k; | |
| 3588 volatile double temp = input_b * input_c; | |
| 3589 volatile double expected = input_a - temp; | |
| 3590 CHECK_EQ(0, m.Call()); | |
| 3591 CheckDoubleEq(expected, output); | |
| 3592 } | 3520 } |
| 3593 } | 3521 } |
| 3594 } | 3522 } |
| 3595 } | 3523 } |
| 3596 | 3524 |
| 3597 | 3525 |
| 3598 TEST(RunFloat64MulImm) { | 3526 TEST(RunFloat64MulAndFloat64Sub1) { |
| 3599 double input = 0.0; | 3527 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64, |
| 3600 double output = 0.0; | 3528 kMachFloat64); |
| 3529 m.Return(m.Float64Sub(m.Float64Mul(m.Parameter(0), m.Parameter(1)), |
| 3530 m.Parameter(2))); |
| 3601 | 3531 |
| 3602 { | 3532 FOR_FLOAT64_INPUTS(i) { |
| 3603 FOR_FLOAT64_INPUTS(i) { | 3533 FOR_FLOAT64_INPUTS(j) { |
| 3604 RawMachineAssemblerTester<int32_t> m; | 3534 FOR_FLOAT64_INPUTS(k) { |
| 3605 Node* t0 = m.LoadFromPointer(&input, kMachFloat64); | 3535 CheckDoubleEq((*i * *j) - *k, m.Call(*i, *j, *k)); |
| 3606 Node* t1 = m.Float64Mul(m.Float64Constant(*i), t0); | |
| 3607 m.StoreToPointer(&output, kMachFloat64, t1); | |
| 3608 m.Return(m.Int32Constant(0)); | |
| 3609 FOR_FLOAT64_INPUTS(j) { | |
| 3610 input = *j; | |
| 3611 double expected = *i * input; | |
| 3612 CHECK_EQ(0, m.Call()); | |
| 3613 CheckDoubleEq(expected, output); | |
| 3614 } | |
| 3615 } | |
| 3616 } | |
| 3617 { | |
| 3618 FOR_FLOAT64_INPUTS(i) { | |
| 3619 RawMachineAssemblerTester<int32_t> m; | |
| 3620 Node* t0 = m.LoadFromPointer(&input, kMachFloat64); | |
| 3621 Node* t1 = m.Float64Mul(t0, m.Float64Constant(*i)); | |
| 3622 m.StoreToPointer(&output, kMachFloat64, t1); | |
| 3623 m.Return(m.Int32Constant(0)); | |
| 3624 FOR_FLOAT64_INPUTS(j) { | |
| 3625 input = *j; | |
| 3626 double expected = input * *i; | |
| 3627 CHECK_EQ(0, m.Call()); | |
| 3628 CheckDoubleEq(expected, output); | |
| 3629 } | 3536 } |
| 3630 } | 3537 } |
| 3631 } | 3538 } |
| 3632 } | 3539 } |
| 3633 | 3540 |
| 3541 |
| 3542 TEST(RunFloat64MulAndFloat64Sub2) { |
| 3543 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64, |
| 3544 kMachFloat64); |
| 3545 m.Return(m.Float64Sub(m.Parameter(0), |
| 3546 m.Float64Mul(m.Parameter(1), m.Parameter(2)))); |
| 3547 |
| 3548 FOR_FLOAT64_INPUTS(i) { |
| 3549 FOR_FLOAT64_INPUTS(j) { |
| 3550 FOR_FLOAT64_INPUTS(k) { |
| 3551 CheckDoubleEq(*i - (*j * *k), m.Call(*i, *j, *k)); |
| 3552 } |
| 3553 } |
| 3554 } |
| 3555 } |
| 3556 |
| 3557 |
| 3558 TEST(RunFloat64MulImm1) { |
| 3559 FOR_FLOAT64_INPUTS(i) { |
| 3560 BufferedRawMachineAssemblerTester<double> m(kMachFloat64); |
| 3561 m.Return(m.Float64Mul(m.Float64Constant(*i), m.Parameter(0))); |
| 3562 |
| 3563 FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*i * *j, m.Call(*j)); } |
| 3564 } |
| 3565 } |
| 3566 |
| 3567 |
| 3568 TEST(RunFloat64MulImm2) { |
| 3569 FOR_FLOAT64_INPUTS(i) { |
| 3570 BufferedRawMachineAssemblerTester<double> m(kMachFloat64); |
| 3571 m.Return(m.Float64Mul(m.Parameter(0), m.Float64Constant(*i))); |
| 3572 |
| 3573 FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*j * *i, m.Call(*j)); } |
| 3574 } |
| 3575 } |
| 3576 |
| 3634 | 3577 |
| 3635 TEST(RunFloat32DivP) { | 3578 TEST(RunFloat32DivP) { |
| 3636 RawMachineAssemblerTester<int32_t> m; | 3579 RawMachineAssemblerTester<int32_t> m; |
| 3637 Float32BinopTester bt(&m); | 3580 Float32BinopTester bt(&m); |
| 3638 | 3581 |
| 3639 bt.AddReturn(m.Float32Div(bt.param0, bt.param1)); | 3582 bt.AddReturn(m.Float32Div(bt.param0, bt.param1)); |
| 3640 | 3583 |
| 3641 FOR_FLOAT32_INPUTS(pl) { | 3584 FOR_FLOAT32_INPUTS(pl) { |
| 3642 FOR_FLOAT32_INPUTS(pr) { | 3585 FOR_FLOAT32_INPUTS(pr) { |
| 3643 float expected = *pl / *pr; | 3586 float expected = *pl / *pr; |
| (...skipping 28 matching lines...) Expand all Loading... |
| 3672 FOR_FLOAT64_INPUTS(j) { | 3615 FOR_FLOAT64_INPUTS(j) { |
| 3673 double expected = modulo(*i, *j); | 3616 double expected = modulo(*i, *j); |
| 3674 double found = bt.call(*i, *j); | 3617 double found = bt.call(*i, *j); |
| 3675 CheckDoubleEq(expected, found); | 3618 CheckDoubleEq(expected, found); |
| 3676 } | 3619 } |
| 3677 } | 3620 } |
| 3678 } | 3621 } |
| 3679 | 3622 |
| 3680 | 3623 |
| 3681 TEST(RunChangeInt32ToFloat64_A) { | 3624 TEST(RunChangeInt32ToFloat64_A) { |
| 3682 RawMachineAssemblerTester<int32_t> m; | |
| 3683 int32_t magic = 0x986234; | 3625 int32_t magic = 0x986234; |
| 3684 double result = 0; | 3626 BufferedRawMachineAssemblerTester<double> m; |
| 3685 | 3627 m.Return(m.ChangeInt32ToFloat64(m.Int32Constant(magic))); |
| 3686 Node* convert = m.ChangeInt32ToFloat64(m.Int32Constant(magic)); | 3628 CheckDoubleEq(static_cast<double>(magic), m.Call()); |
| 3687 m.Store(kMachFloat64, m.PointerConstant(&result), m.Int32Constant(0), convert, | |
| 3688 kNoWriteBarrier); | |
| 3689 m.Return(m.Int32Constant(magic)); | |
| 3690 | |
| 3691 CHECK_EQ(magic, m.Call()); | |
| 3692 CHECK_EQ(static_cast<double>(magic), result); | |
| 3693 } | 3629 } |
| 3694 | 3630 |
| 3695 | 3631 |
| 3696 TEST(RunChangeInt32ToFloat64_B) { | 3632 TEST(RunChangeInt32ToFloat64_B) { |
| 3697 RawMachineAssemblerTester<int32_t> m(kMachInt32); | 3633 BufferedRawMachineAssemblerTester<double> m(kMachInt32); |
| 3698 double output = 0; | 3634 m.Return(m.ChangeInt32ToFloat64(m.Parameter(0))); |
| 3699 | 3635 |
| 3700 Node* convert = m.ChangeInt32ToFloat64(m.Parameter(0)); | 3636 FOR_INT32_INPUTS(i) { CheckDoubleEq(static_cast<double>(*i), m.Call(*i)); } |
| 3701 m.Store(kMachFloat64, m.PointerConstant(&output), m.Int32Constant(0), convert, | |
| 3702 kNoWriteBarrier); | |
| 3703 m.Return(m.Parameter(0)); | |
| 3704 | |
| 3705 FOR_INT32_INPUTS(i) { | |
| 3706 int32_t expect = *i; | |
| 3707 CHECK_EQ(expect, m.Call(expect)); | |
| 3708 CHECK_EQ(static_cast<double>(expect), output); | |
| 3709 } | |
| 3710 } | 3637 } |
| 3711 | 3638 |
| 3712 | 3639 |
| 3713 TEST(RunChangeUint32ToFloat64_B) { | 3640 TEST(RunChangeUint32ToFloat64_B) { |
| 3714 RawMachineAssemblerTester<uint32_t> m(kMachUint32); | 3641 RawMachineAssemblerTester<uint32_t> m(kMachUint32); |
| 3715 double output = 0; | 3642 double output = 0; |
| 3716 | 3643 |
| 3717 Node* convert = m.ChangeUint32ToFloat64(m.Parameter(0)); | 3644 Node* convert = m.ChangeUint32ToFloat64(m.Parameter(0)); |
| 3718 m.Store(kMachFloat64, m.PointerConstant(&output), m.Int32Constant(0), convert, | 3645 m.Store(kMachFloat64, m.PointerConstant(&output), m.Int32Constant(0), convert, |
| 3719 kNoWriteBarrier); | 3646 kNoWriteBarrier); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3753 | 3680 |
| 3754 CHECK_EQ(magic, m.Call()); | 3681 CHECK_EQ(magic, m.Call()); |
| 3755 | 3682 |
| 3756 for (int i = 0; i < kNumInputs; i++) { | 3683 for (int i = 0; i < kNumInputs; i++) { |
| 3757 CHECK_EQ(result[i], static_cast<double>(100 + i)); | 3684 CHECK_EQ(result[i], static_cast<double>(100 + i)); |
| 3758 } | 3685 } |
| 3759 } | 3686 } |
| 3760 | 3687 |
| 3761 | 3688 |
| 3762 TEST(RunChangeFloat64ToInt32_A) { | 3689 TEST(RunChangeFloat64ToInt32_A) { |
| 3763 RawMachineAssemblerTester<int32_t> m; | 3690 BufferedRawMachineAssemblerTester<int32_t> m; |
| 3764 int32_t magic = 0x786234; | 3691 double magic = 11.1; |
| 3765 double input = 11.1; | 3692 m.Return(m.ChangeFloat64ToInt32(m.Float64Constant(magic))); |
| 3766 int32_t result = 0; | 3693 CHECK_EQ(static_cast<int32_t>(magic), m.Call()); |
| 3767 | |
| 3768 m.Store(kMachInt32, m.PointerConstant(&result), m.Int32Constant(0), | |
| 3769 m.ChangeFloat64ToInt32(m.Float64Constant(input)), kNoWriteBarrier); | |
| 3770 m.Return(m.Int32Constant(magic)); | |
| 3771 | |
| 3772 CHECK_EQ(magic, m.Call()); | |
| 3773 CHECK_EQ(static_cast<int32_t>(input), result); | |
| 3774 } | 3694 } |
| 3775 | 3695 |
| 3776 | 3696 |
| 3777 TEST(RunChangeFloat64ToInt32_B) { | 3697 TEST(RunChangeFloat64ToInt32_B) { |
| 3778 RawMachineAssemblerTester<int32_t> m; | 3698 BufferedRawMachineAssemblerTester<int32_t> m(kMachFloat64); |
| 3779 double input = 0; | 3699 m.Return(m.ChangeFloat64ToInt32(m.Parameter(0))); |
| 3780 int32_t output = 0; | |
| 3781 | 3700 |
| 3782 Node* load = | 3701 // Note we don't check fractional inputs, or inputs outside the range of |
| 3783 m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(0)); | 3702 // int32, because these Convert operators really should be Change operators. |
| 3784 Node* convert = m.ChangeFloat64ToInt32(load); | 3703 FOR_INT32_INPUTS(i) { CHECK_EQ(*i, m.Call(static_cast<double>(*i))); } |
| 3785 m.Store(kMachInt32, m.PointerConstant(&output), m.Int32Constant(0), convert, | |
| 3786 kNoWriteBarrier); | |
| 3787 m.Return(convert); | |
| 3788 | 3704 |
| 3789 { | 3705 for (int32_t n = 1; n < 31; ++n) { |
| 3790 FOR_INT32_INPUTS(i) { | 3706 CHECK_EQ(1 << n, m.Call(static_cast<double>(1 << n))); |
| 3791 input = *i; | |
| 3792 int32_t expect = *i; | |
| 3793 CHECK_EQ(expect, m.Call()); | |
| 3794 CHECK_EQ(expect, output); | |
| 3795 } | |
| 3796 } | 3707 } |
| 3797 | 3708 |
| 3798 // Check various powers of 2. | |
| 3799 for (int32_t n = 1; n < 31; ++n) { | 3709 for (int32_t n = 1; n < 31; ++n) { |
| 3800 { | 3710 CHECK_EQ(3 << n, m.Call(static_cast<double>(3 << n))); |
| 3801 input = 1 << n; | |
| 3802 int32_t expect = static_cast<int32_t>(input); | |
| 3803 CHECK_EQ(expect, m.Call()); | |
| 3804 CHECK_EQ(expect, output); | |
| 3805 } | |
| 3806 | |
| 3807 { | |
| 3808 input = 3 << n; | |
| 3809 int32_t expect = static_cast<int32_t>(input); | |
| 3810 CHECK_EQ(expect, m.Call()); | |
| 3811 CHECK_EQ(expect, output); | |
| 3812 } | |
| 3813 } | 3711 } |
| 3814 // Note we don't check fractional inputs, because these Convert operators | |
| 3815 // really should be Change operators. | |
| 3816 } | 3712 } |
| 3817 | 3713 |
| 3818 | 3714 |
| 3819 TEST(RunChangeFloat64ToUint32_B) { | 3715 TEST(RunChangeFloat64ToUint32_B) { |
| 3820 RawMachineAssemblerTester<int32_t> m; | 3716 RawMachineAssemblerTester<int32_t> m; |
| 3821 double input = 0; | 3717 double input = 0; |
| 3822 int32_t output = 0; | 3718 int32_t output = 0; |
| 3823 | 3719 |
| 3824 Node* load = | 3720 Node* load = |
| 3825 m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(0)); | 3721 m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(0)); |
| (...skipping 1740 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5566 Node* call = r.AddNode(r.common()->Call(desc), phi); | 5462 Node* call = r.AddNode(r.common()->Call(desc), phi); |
| 5567 r.Return(call); | 5463 r.Return(call); |
| 5568 | 5464 |
| 5569 CHECK_EQ(33, r.Call(1)); | 5465 CHECK_EQ(33, r.Call(1)); |
| 5570 CHECK_EQ(44, r.Call(0)); | 5466 CHECK_EQ(44, r.Call(0)); |
| 5571 } | 5467 } |
| 5572 | 5468 |
| 5573 } // namespace compiler | 5469 } // namespace compiler |
| 5574 } // namespace internal | 5470 } // namespace internal |
| 5575 } // namespace v8 | 5471 } // namespace v8 |
| OLD | NEW |