| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_ARM64_UTILS_ARM64_H_ | 5 #ifndef V8_ARM64_UTILS_ARM64_H_ |
| 6 #define V8_ARM64_UTILS_ARM64_H_ | 6 #define V8_ARM64_UTILS_ARM64_H_ |
| 7 | 7 |
| 8 #include <cmath> | 8 #include <cmath> |
| 9 | 9 |
| 10 #include "src/arm64/constants-arm64.h" | 10 #include "src/arm64/constants-arm64.h" |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 144 // Fused multiply-add. | 144 // Fused multiply-add. |
| 145 inline double FusedMultiplyAdd(double op1, double op2, double a) { | 145 inline double FusedMultiplyAdd(double op1, double op2, double a) { |
| 146 return fma(op1, op2, a); | 146 return fma(op1, op2, a); |
| 147 } | 147 } |
| 148 | 148 |
| 149 | 149 |
| 150 inline float FusedMultiplyAdd(float op1, float op2, float a) { | 150 inline float FusedMultiplyAdd(float op1, float op2, float a) { |
| 151 return fmaf(op1, op2, a); | 151 return fmaf(op1, op2, a); |
| 152 } | 152 } |
| 153 | 153 |
| 154 } } // namespace v8::internal | 154 } // namespace internal |
| 155 } // namespace v8 |
| 155 | 156 |
| 156 #endif // V8_ARM64_UTILS_ARM64_H_ | 157 #endif // V8_ARM64_UTILS_ARM64_H_ |
| OLD | NEW |