OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. Use of this | 1 // Copyright 2014 the V8 project authors. All rights reserved. Use of this |
2 // source code is governed by a BSD-style license that can be found in the | 2 // source code is governed by a BSD-style license that can be found in the |
3 // LICENSE file. | 3 // LICENSE file. |
4 | 4 |
5 // TODO(jochen): Remove this after the setting is turned on globally. | 5 // TODO(jochen): Remove this after the setting is turned on globally. |
6 #define V8_IMMINENT_DEPRECATION_WARNINGS | 6 #define V8_IMMINENT_DEPRECATION_WARNINGS |
7 | 7 |
8 #include <cmath> | 8 #include <cmath> |
9 #include <functional> | 9 #include <functional> |
10 #include <limits> | 10 #include <limits> |
(...skipping 3224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3235 RunLoadStore<float>(kMachFloat32); | 3235 RunLoadStore<float>(kMachFloat32); |
3236 RunLoadStore<double>(kMachFloat64); | 3236 RunLoadStore<double>(kMachFloat64); |
3237 } | 3237 } |
3238 | 3238 |
3239 | 3239 |
3240 TEST(RunFloat32Add) { | 3240 TEST(RunFloat32Add) { |
3241 BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32); | 3241 BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32); |
3242 m.Return(m.Float32Add(m.Parameter(0), m.Parameter(1))); | 3242 m.Return(m.Float32Add(m.Parameter(0), m.Parameter(1))); |
3243 | 3243 |
3244 FOR_FLOAT32_INPUTS(i) { | 3244 FOR_FLOAT32_INPUTS(i) { |
3245 FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i + *j, m.Call(*i, *j)); } | 3245 FOR_FLOAT32_INPUTS(j) { |
| 3246 volatile float expected = *i + *j; |
| 3247 CheckFloatEq(expected, m.Call(*i, *j)); |
| 3248 } |
3246 } | 3249 } |
3247 } | 3250 } |
3248 | 3251 |
3249 | 3252 |
3250 TEST(RunFloat32Sub) { | 3253 TEST(RunFloat32Sub) { |
3251 BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32); | 3254 BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32); |
3252 m.Return(m.Float32Sub(m.Parameter(0), m.Parameter(1))); | 3255 m.Return(m.Float32Sub(m.Parameter(0), m.Parameter(1))); |
3253 | 3256 |
3254 FOR_FLOAT32_INPUTS(i) { | 3257 FOR_FLOAT32_INPUTS(i) { |
3255 FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i - *j, m.Call(*i, *j)); } | 3258 FOR_FLOAT32_INPUTS(j) { |
| 3259 volatile float expected = *i - *j; |
| 3260 CheckFloatEq(expected, m.Call(*i, *j)); |
| 3261 } |
3256 } | 3262 } |
3257 } | 3263 } |
3258 | 3264 |
3259 | 3265 |
3260 TEST(RunFloat32Mul) { | 3266 TEST(RunFloat32Mul) { |
3261 BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32); | 3267 BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32); |
3262 m.Return(m.Float32Mul(m.Parameter(0), m.Parameter(1))); | 3268 m.Return(m.Float32Mul(m.Parameter(0), m.Parameter(1))); |
3263 | 3269 |
3264 FOR_FLOAT32_INPUTS(i) { | 3270 FOR_FLOAT32_INPUTS(i) { |
3265 FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i * *j, m.Call(*i, *j)); } | 3271 FOR_FLOAT32_INPUTS(j) { |
| 3272 volatile float expected = *i * *j; |
| 3273 CheckFloatEq(expected, m.Call(*i, *j)); |
| 3274 } |
3266 } | 3275 } |
3267 } | 3276 } |
3268 | 3277 |
3269 | 3278 |
3270 TEST(RunFloat32Div) { | 3279 TEST(RunFloat32Div) { |
3271 BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32); | 3280 BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32); |
3272 m.Return(m.Float32Div(m.Parameter(0), m.Parameter(1))); | 3281 m.Return(m.Float32Div(m.Parameter(0), m.Parameter(1))); |
3273 | 3282 |
3274 FOR_FLOAT32_INPUTS(i) { | 3283 FOR_FLOAT32_INPUTS(i) { |
3275 FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i / *j, m.Call(*i, *j)); } | 3284 FOR_FLOAT32_INPUTS(j) { |
| 3285 volatile float expected = *i / *j; |
| 3286 CheckFloatEq(expected, m.Call(*i, *j)); |
| 3287 } |
3276 } | 3288 } |
3277 } | 3289 } |
3278 | 3290 |
3279 | 3291 |
3280 TEST(RunFloat64Add) { | 3292 TEST(RunFloat64Add) { |
3281 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); | 3293 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); |
3282 m.Return(m.Float64Add(m.Parameter(0), m.Parameter(1))); | 3294 m.Return(m.Float64Add(m.Parameter(0), m.Parameter(1))); |
3283 | 3295 |
3284 FOR_FLOAT64_INPUTS(i) { | 3296 FOR_FLOAT64_INPUTS(i) { |
3285 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i + *j, m.Call(*i, *j)); } | 3297 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i + *j, m.Call(*i, *j)); } |
3286 } | 3298 } |
3287 } | 3299 } |
3288 | 3300 |
3289 | 3301 |
3290 TEST(RunFloat64Sub) { | 3302 TEST(RunFloat64Sub) { |
3291 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); | 3303 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); |
3292 m.Return(m.Float64Sub(m.Parameter(0), m.Parameter(1))); | 3304 m.Return(m.Float64Sub(m.Parameter(0), m.Parameter(1))); |
3293 | 3305 |
3294 FOR_FLOAT64_INPUTS(i) { | 3306 FOR_FLOAT64_INPUTS(i) { |
3295 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i - *j, m.Call(*i, *j)); } | 3307 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i - *j, m.Call(*i, *j)); } |
3296 } | 3308 } |
3297 } | 3309 } |
3298 | 3310 |
3299 | 3311 |
3300 TEST(RunFloat64Mul) { | 3312 TEST(RunFloat64Mul) { |
3301 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); | 3313 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); |
3302 m.Return(m.Float64Mul(m.Parameter(0), m.Parameter(1))); | 3314 m.Return(m.Float64Mul(m.Parameter(0), m.Parameter(1))); |
3303 | 3315 |
3304 FOR_FLOAT64_INPUTS(i) { | 3316 FOR_FLOAT64_INPUTS(i) { |
3305 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i * *j, m.Call(*i, *j)); } | 3317 FOR_FLOAT64_INPUTS(j) { |
| 3318 volatile double expected = *i * *j; |
| 3319 CheckDoubleEq(expected, m.Call(*i, *j)); |
| 3320 } |
3306 } | 3321 } |
3307 } | 3322 } |
3308 | 3323 |
3309 | 3324 |
3310 TEST(RunFloat64Div) { | 3325 TEST(RunFloat64Div) { |
3311 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); | 3326 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); |
3312 m.Return(m.Float64Div(m.Parameter(0), m.Parameter(1))); | 3327 m.Return(m.Float64Div(m.Parameter(0), m.Parameter(1))); |
3313 | 3328 |
3314 FOR_FLOAT64_INPUTS(i) { | 3329 FOR_FLOAT64_INPUTS(i) { |
3315 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i / *j, m.Call(*i, *j)); } | 3330 FOR_FLOAT64_INPUTS(j) { |
| 3331 volatile double expected = *i / *j; |
| 3332 CheckDoubleEq(expected, m.Call(*i, *j)); |
| 3333 } |
3316 } | 3334 } |
3317 } | 3335 } |
3318 | 3336 |
3319 | 3337 |
3320 TEST(RunFloat64Mod) { | 3338 TEST(RunFloat64Mod) { |
3321 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); | 3339 BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64); |
3322 m.Return(m.Float64Mod(m.Parameter(0), m.Parameter(1))); | 3340 m.Return(m.Float64Mod(m.Parameter(0), m.Parameter(1))); |
3323 | 3341 |
3324 FOR_FLOAT64_INPUTS(i) { | 3342 FOR_FLOAT64_INPUTS(i) { |
3325 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(modulo(*i, *j), m.Call(*i, *j)); } | 3343 FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(modulo(*i, *j), m.Call(*i, *j)); } |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3404 } | 3422 } |
3405 } | 3423 } |
3406 } | 3424 } |
3407 | 3425 |
3408 | 3426 |
3409 TEST(RunFloat32SubImm1) { | 3427 TEST(RunFloat32SubImm1) { |
3410 FOR_FLOAT32_INPUTS(i) { | 3428 FOR_FLOAT32_INPUTS(i) { |
3411 BufferedRawMachineAssemblerTester<float> m(kMachFloat32); | 3429 BufferedRawMachineAssemblerTester<float> m(kMachFloat32); |
3412 m.Return(m.Float32Sub(m.Float32Constant(*i), m.Parameter(0))); | 3430 m.Return(m.Float32Sub(m.Float32Constant(*i), m.Parameter(0))); |
3413 | 3431 |
3414 FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*i - *j, m.Call(*j)); } | 3432 FOR_FLOAT32_INPUTS(j) { |
| 3433 volatile float expected = *i - *j; |
| 3434 CheckFloatEq(expected, m.Call(*j)); |
| 3435 } |
3415 } | 3436 } |
3416 } | 3437 } |
3417 | 3438 |
3418 | 3439 |
3419 TEST(RunFloat32SubImm2) { | 3440 TEST(RunFloat32SubImm2) { |
3420 FOR_FLOAT32_INPUTS(i) { | 3441 FOR_FLOAT32_INPUTS(i) { |
3421 BufferedRawMachineAssemblerTester<float> m(kMachFloat32); | 3442 BufferedRawMachineAssemblerTester<float> m(kMachFloat32); |
3422 m.Return(m.Float32Sub(m.Parameter(0), m.Float32Constant(*i))); | 3443 m.Return(m.Float32Sub(m.Parameter(0), m.Float32Constant(*i))); |
3423 | 3444 |
3424 FOR_FLOAT32_INPUTS(j) { CheckFloatEq(*j - *i, m.Call(*j)); } | 3445 FOR_FLOAT32_INPUTS(j) { |
| 3446 volatile float expected = *j - *i; |
| 3447 CheckFloatEq(expected, m.Call(*j)); |
| 3448 } |
3425 } | 3449 } |
3426 } | 3450 } |
3427 | 3451 |
3428 | 3452 |
3429 TEST(RunFloat64SubImm1) { | 3453 TEST(RunFloat64SubImm1) { |
3430 FOR_FLOAT64_INPUTS(i) { | 3454 FOR_FLOAT64_INPUTS(i) { |
3431 BufferedRawMachineAssemblerTester<double> m(kMachFloat64); | 3455 BufferedRawMachineAssemblerTester<double> m(kMachFloat64); |
3432 m.Return(m.Float64Sub(m.Float64Constant(*i), m.Parameter(0))); | 3456 m.Return(m.Float64Sub(m.Float64Constant(*i), m.Parameter(0))); |
3433 | 3457 |
3434 FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*i - *j, m.Call(*j)); } | 3458 FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*i - *j, m.Call(*j)); } |
(...skipping 2036 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5471 Node* call = r.AddNode(r.common()->Call(desc), phi); | 5495 Node* call = r.AddNode(r.common()->Call(desc), phi); |
5472 r.Return(call); | 5496 r.Return(call); |
5473 | 5497 |
5474 CHECK_EQ(33, r.Call(1)); | 5498 CHECK_EQ(33, r.Call(1)); |
5475 CHECK_EQ(44, r.Call(0)); | 5499 CHECK_EQ(44, r.Call(0)); |
5476 } | 5500 } |
5477 | 5501 |
5478 } // namespace compiler | 5502 } // namespace compiler |
5479 } // namespace internal | 5503 } // namespace internal |
5480 } // namespace v8 | 5504 } // namespace v8 |
OLD | NEW |