| OLD | NEW |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_CODE_STUB_ASSEMBLER_H_ | 5 #ifndef V8_CODE_STUB_ASSEMBLER_H_ |
| 6 #define V8_CODE_STUB_ASSEMBLER_H_ | 6 #define V8_CODE_STUB_ASSEMBLER_H_ |
| 7 | 7 |
| 8 #include <functional> | 8 #include <functional> |
| 9 | 9 |
| 10 #include "src/compiler/code-assembler.h" | 10 #include "src/compiler/code-assembler.h" |
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 133 compiler::Node* SmiMax(compiler::Node* a, compiler::Node* b); | 133 compiler::Node* SmiMax(compiler::Node* a, compiler::Node* b); |
| 134 compiler::Node* SmiMin(compiler::Node* a, compiler::Node* b); | 134 compiler::Node* SmiMin(compiler::Node* a, compiler::Node* b); |
| 135 // Computes a % b for Smi inputs a and b; result is not necessarily a Smi. | 135 // Computes a % b for Smi inputs a and b; result is not necessarily a Smi. |
| 136 compiler::Node* SmiMod(compiler::Node* a, compiler::Node* b); | 136 compiler::Node* SmiMod(compiler::Node* a, compiler::Node* b); |
| 137 // Computes a * b for Smi inputs a and b; result is not necessarily a Smi. | 137 // Computes a * b for Smi inputs a and b; result is not necessarily a Smi. |
| 138 compiler::Node* SmiMul(compiler::Node* a, compiler::Node* b); | 138 compiler::Node* SmiMul(compiler::Node* a, compiler::Node* b); |
| 139 compiler::Node* SmiOr(compiler::Node* a, compiler::Node* b) { | 139 compiler::Node* SmiOr(compiler::Node* a, compiler::Node* b) { |
| 140 return WordOr(a, b); | 140 return WordOr(a, b); |
| 141 } | 141 } |
| 142 | 142 |
| 143 // Smi | HeapNumber operations. |
| 144 compiler::Node* NumberInc(compiler::Node* value); |
| 145 |
| 143 // Allocate an object of the given size. | 146 // Allocate an object of the given size. |
| 144 compiler::Node* Allocate(compiler::Node* size, AllocationFlags flags = kNone); | 147 compiler::Node* Allocate(compiler::Node* size, AllocationFlags flags = kNone); |
| 145 compiler::Node* Allocate(int size, AllocationFlags flags = kNone); | 148 compiler::Node* Allocate(int size, AllocationFlags flags = kNone); |
| 146 compiler::Node* InnerAllocate(compiler::Node* previous, int offset); | 149 compiler::Node* InnerAllocate(compiler::Node* previous, int offset); |
| 147 compiler::Node* InnerAllocate(compiler::Node* previous, | 150 compiler::Node* InnerAllocate(compiler::Node* previous, |
| 148 compiler::Node* offset); | 151 compiler::Node* offset); |
| 149 | 152 |
| 150 void Assert(compiler::Node* condition); | 153 void Assert(compiler::Node* condition); |
| 151 | 154 |
| 152 // Check a value for smi-ness | 155 // Check a value for smi-ness |
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 288 int additional_offset = 0, | 291 int additional_offset = 0, |
| 289 ParameterMode parameter_mode = INTEGER_PARAMETERS, | 292 ParameterMode parameter_mode = INTEGER_PARAMETERS, |
| 290 Label* if_hole = nullptr); | 293 Label* if_hole = nullptr); |
| 291 | 294 |
| 292 // Load Float64 value by |base| + |offset| address. If the value is a double | 295 // Load Float64 value by |base| + |offset| address. If the value is a double |
| 293 // hole then jump to |if_hole|. If |machine_type| is None then only the hole | 296 // hole then jump to |if_hole|. If |machine_type| is None then only the hole |
| 294 // check is generated. | 297 // check is generated. |
| 295 compiler::Node* LoadDoubleWithHoleCheck( | 298 compiler::Node* LoadDoubleWithHoleCheck( |
| 296 compiler::Node* base, compiler::Node* offset, Label* if_hole, | 299 compiler::Node* base, compiler::Node* offset, Label* if_hole, |
| 297 MachineType machine_type = MachineType::Float64()); | 300 MachineType machine_type = MachineType::Float64()); |
| 301 compiler::Node* LoadFixedTypedArrayElement( |
| 302 compiler::Node* data_pointer, compiler::Node* index_node, |
| 303 ElementsKind elements_kind, |
| 304 ParameterMode parameter_mode = INTEGER_PARAMETERS); |
| 298 | 305 |
| 299 // Context manipulation | 306 // Context manipulation |
| 300 compiler::Node* LoadContextElement(compiler::Node* context, int slot_index); | 307 compiler::Node* LoadContextElement(compiler::Node* context, int slot_index); |
| 301 compiler::Node* StoreContextElement(compiler::Node* context, int slot_index, | 308 compiler::Node* StoreContextElement(compiler::Node* context, int slot_index, |
| 302 compiler::Node* value); | 309 compiler::Node* value); |
| 303 compiler::Node* LoadNativeContext(compiler::Node* context); | 310 compiler::Node* LoadNativeContext(compiler::Node* context); |
| 304 | 311 |
| 305 compiler::Node* LoadJSArrayElementsMap(ElementsKind kind, | 312 compiler::Node* LoadJSArrayElementsMap(ElementsKind kind, |
| 306 compiler::Node* native_context); | 313 compiler::Node* native_context); |
| 307 | 314 |
| (...skipping 650 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 958 Label* bailout); | 965 Label* bailout); |
| 959 | 966 |
| 960 static const int kElementLoopUnrollThreshold = 8; | 967 static const int kElementLoopUnrollThreshold = 8; |
| 961 }; | 968 }; |
| 962 | 969 |
| 963 DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags); | 970 DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags); |
| 964 | 971 |
| 965 } // namespace internal | 972 } // namespace internal |
| 966 } // namespace v8 | 973 } // namespace v8 |
| 967 #endif // V8_CODE_STUB_ASSEMBLER_H_ | 974 #endif // V8_CODE_STUB_ASSEMBLER_H_ |
| OLD | NEW |