OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_ARM64_UTILS_ARM64_H_ | 5 #ifndef V8_ARM64_UTILS_ARM64_H_ |
6 #define V8_ARM64_UTILS_ARM64_H_ | 6 #define V8_ARM64_UTILS_ARM64_H_ |
7 | 7 |
8 #include <cmath> | 8 #include <cmath> |
9 #include "v8.h" | 9 #include "src/v8.h" |
10 #include "arm64/constants-arm64.h" | 10 #include "src/arm64/constants-arm64.h" |
11 | 11 |
12 #define REGISTER_CODE_LIST(R) \ | 12 #define REGISTER_CODE_LIST(R) \ |
13 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ | 13 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ |
14 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ | 14 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ |
15 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ | 15 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ |
16 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) | 16 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) |
17 | 17 |
18 namespace v8 { | 18 namespace v8 { |
19 namespace internal { | 19 namespace internal { |
20 | 20 |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
103 } | 103 } |
104 | 104 |
105 | 105 |
106 inline float FusedMultiplyAdd(float op1, float op2, float a) { | 106 inline float FusedMultiplyAdd(float op1, float op2, float a) { |
107 return fmaf(op1, op2, a); | 107 return fmaf(op1, op2, a); |
108 } | 108 } |
109 | 109 |
110 } } // namespace v8::internal | 110 } } // namespace v8::internal |
111 | 111 |
112 #endif // V8_ARM64_UTILS_ARM64_H_ | 112 #endif // V8_ARM64_UTILS_ARM64_H_ |
OLD | NEW |