| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_COMPILER_CODE_GENERATOR_H_ | 5 #ifndef V8_COMPILER_CODE_GENERATOR_H_ |
| 6 #define V8_COMPILER_CODE_GENERATOR_H_ | 6 #define V8_COMPILER_CODE_GENERATOR_H_ |
| 7 | 7 |
| 8 #include "src/compiler/gap-resolver.h" | 8 #include "src/compiler/gap-resolver.h" |
| 9 #include "src/compiler/instruction.h" | 9 #include "src/compiler/instruction.h" |
| 10 #include "src/deoptimizer.h" | 10 #include "src/deoptimizer.h" |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 83 | 83 |
| 84 // Check if a heap object can be materialized by loading from the frame, which | 84 // Check if a heap object can be materialized by loading from the frame, which |
| 85 // is usually way cheaper than materializing the actual heap object constant. | 85 // is usually way cheaper than materializing the actual heap object constant. |
| 86 bool IsMaterializableFromFrame(Handle<HeapObject> object, int* slot_return); | 86 bool IsMaterializableFromFrame(Handle<HeapObject> object, int* slot_return); |
| 87 // Check if a heap object can be materialized by loading from a heap root, | 87 // Check if a heap object can be materialized by loading from a heap root, |
| 88 // which is cheaper on some platforms than materializing the actual heap | 88 // which is cheaper on some platforms than materializing the actual heap |
| 89 // object constant. | 89 // object constant. |
| 90 bool IsMaterializableFromRoot(Handle<HeapObject> object, | 90 bool IsMaterializableFromRoot(Handle<HeapObject> object, |
| 91 Heap::RootListIndex* index_return); | 91 Heap::RootListIndex* index_return); |
| 92 | 92 |
| 93 enum CodeGenResult { kSuccess, kTooManyDeoptimizationBailouts }; |
| 94 |
| 93 // Assemble instructions for the specified block. | 95 // Assemble instructions for the specified block. |
| 94 void AssembleBlock(const InstructionBlock* block); | 96 CodeGenResult AssembleBlock(const InstructionBlock* block); |
| 95 | 97 |
| 96 // Assemble code for the specified instruction. | 98 // Assemble code for the specified instruction. |
| 97 void AssembleInstruction(Instruction* instr, const InstructionBlock* block); | 99 CodeGenResult AssembleInstruction(Instruction* instr, |
| 100 const InstructionBlock* block); |
| 98 void AssembleSourcePosition(Instruction* instr); | 101 void AssembleSourcePosition(Instruction* instr); |
| 99 void AssembleGaps(Instruction* instr); | 102 void AssembleGaps(Instruction* instr); |
| 100 | 103 |
| 101 // =========================================================================== | 104 // =========================================================================== |
| 102 // ============= Architecture-specific code generation methods. ============== | 105 // ============= Architecture-specific code generation methods. ============== |
| 103 // =========================================================================== | 106 // =========================================================================== |
| 104 | 107 |
| 105 void AssembleArchInstruction(Instruction* instr); | 108 CodeGenResult AssembleArchInstruction(Instruction* instr); |
| 106 void AssembleArchJump(RpoNumber target); | 109 void AssembleArchJump(RpoNumber target); |
| 107 void AssembleArchBranch(Instruction* instr, BranchInfo* branch); | 110 void AssembleArchBranch(Instruction* instr, BranchInfo* branch); |
| 108 void AssembleArchBoolean(Instruction* instr, FlagsCondition condition); | 111 void AssembleArchBoolean(Instruction* instr, FlagsCondition condition); |
| 109 void AssembleArchLookupSwitch(Instruction* instr); | 112 void AssembleArchLookupSwitch(Instruction* instr); |
| 110 void AssembleArchTableSwitch(Instruction* instr); | 113 void AssembleArchTableSwitch(Instruction* instr); |
| 111 | 114 |
| 112 void AssembleDeoptimizerCall(int deoptimization_id, | 115 CodeGenResult AssembleDeoptimizerCall(int deoptimization_id, |
| 113 Deoptimizer::BailoutType bailout_type); | 116 Deoptimizer::BailoutType bailout_type); |
| 114 | 117 |
| 115 // Generates an architecture-specific, descriptor-specific prologue | 118 // Generates an architecture-specific, descriptor-specific prologue |
| 116 // to set up a stack frame. | 119 // to set up a stack frame. |
| 117 void AssembleConstructFrame(); | 120 void AssembleConstructFrame(); |
| 118 | 121 |
| 119 // Generates an architecture-specific, descriptor-specific return sequence | 122 // Generates an architecture-specific, descriptor-specific return sequence |
| 120 // to tear down a stack frame. | 123 // to tear down a stack frame. |
| 121 void AssembleReturn(); | 124 void AssembleReturn(); |
| 122 | 125 |
| 123 // Generates code to deconstruct a the caller's frame, including arguments. | 126 // Generates code to deconstruct a the caller's frame, including arguments. |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 239 JumpTable* jump_tables_; | 242 JumpTable* jump_tables_; |
| 240 OutOfLineCode* ools_; | 243 OutOfLineCode* ools_; |
| 241 int osr_pc_offset_; | 244 int osr_pc_offset_; |
| 242 }; | 245 }; |
| 243 | 246 |
| 244 } // namespace compiler | 247 } // namespace compiler |
| 245 } // namespace internal | 248 } // namespace internal |
| 246 } // namespace v8 | 249 } // namespace v8 |
| 247 | 250 |
| 248 #endif // V8_COMPILER_CODE_GENERATOR_H | 251 #endif // V8_COMPILER_CODE_GENERATOR_H |
| OLD | NEW |