OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_ARM64_UTILS_ARM64_H_ | 5 #ifndef V8_ARM64_UTILS_ARM64_H_ |
6 #define V8_ARM64_UTILS_ARM64_H_ | 6 #define V8_ARM64_UTILS_ARM64_H_ |
7 | 7 |
8 #include <cmath> | 8 #include <cmath> |
9 #include "src/v8.h" | |
10 | 9 |
11 #include "src/arm64/constants-arm64.h" | 10 #include "src/arm64/constants-arm64.h" |
12 | 11 |
13 #define REGISTER_CODE_LIST(R) \ | 12 #define REGISTER_CODE_LIST(R) \ |
14 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ | 13 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ |
15 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ | 14 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ |
16 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ | 15 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ |
17 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) | 16 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) |
18 | 17 |
19 namespace v8 { | 18 namespace v8 { |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
148 } | 147 } |
149 | 148 |
150 | 149 |
151 inline float FusedMultiplyAdd(float op1, float op2, float a) { | 150 inline float FusedMultiplyAdd(float op1, float op2, float a) { |
152 return fmaf(op1, op2, a); | 151 return fmaf(op1, op2, a); |
153 } | 152 } |
154 | 153 |
155 } } // namespace v8::internal | 154 } } // namespace v8::internal |
156 | 155 |
157 #endif // V8_ARM64_UTILS_ARM64_H_ | 156 #endif // V8_ARM64_UTILS_ARM64_H_ |
OLD | NEW |