| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
| 15 // | 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_ | 28 #ifndef V8_A64_LITHIUM_CODEGEN_A64_H_ |
| 29 #define V8_ARM_LITHIUM_CODEGEN_ARM_H_ | 29 #define V8_A64_LITHIUM_CODEGEN_A64_H_ |
| 30 | 30 |
| 31 #include "arm/lithium-arm.h" | 31 #include "a64/lithium-a64.h" |
| 32 | 32 |
| 33 #include "arm/lithium-gap-resolver-arm.h" | 33 #include "a64/lithium-gap-resolver-a64.h" |
| 34 #include "deoptimizer.h" | 34 #include "deoptimizer.h" |
| 35 #include "lithium-codegen.h" | 35 #include "lithium-codegen.h" |
| 36 #include "safepoint-table.h" | 36 #include "safepoint-table.h" |
| 37 #include "scopes.h" | 37 #include "scopes.h" |
| 38 #include "v8utils.h" | 38 #include "v8utils.h" |
| 39 | 39 |
| 40 namespace v8 { | 40 namespace v8 { |
| 41 namespace internal { | 41 namespace internal { |
| 42 | 42 |
| 43 // Forward declarations. | 43 // Forward declarations. |
| 44 class LDeferredCode; | 44 class LDeferredCode; |
| 45 class SafepointGenerator; | 45 class SafepointGenerator; |
| 46 class BranchGenerator; |
| 46 | 47 |
| 47 class LCodeGen: public LCodeGenBase { | 48 class LCodeGen: public LCodeGenBase { |
| 48 public: | 49 public: |
| 49 LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) | 50 LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) |
| 50 : LCodeGenBase(chunk, assembler, info), | 51 : LCodeGenBase(chunk, assembler, info), |
| 51 deoptimizations_(4, info->zone()), | 52 deoptimizations_(4, info->zone()), |
| 52 deopt_jump_table_(4, info->zone()), | 53 deopt_jump_table_(4, info->zone()), |
| 53 deoptimization_literals_(8, info->zone()), | 54 deoptimization_literals_(8, info->zone()), |
| 54 inlined_function_count_(0), | 55 inlined_function_count_(0), |
| 55 scope_(info->scope()), | 56 scope_(info->scope()), |
| 56 translations_(info->zone()), | 57 translations_(info->zone()), |
| 57 deferred_(8, info->zone()), | 58 deferred_(8, info->zone()), |
| 58 osr_pc_offset_(-1), | 59 osr_pc_offset_(-1), |
| 59 frame_is_built_(false), | 60 frame_is_built_(false), |
| 60 safepoints_(info->zone()), | 61 safepoints_(info->zone()), |
| 61 resolver_(this), | 62 resolver_(this), |
| 62 expected_safepoint_kind_(Safepoint::kSimple) { | 63 expected_safepoint_kind_(Safepoint::kSimple) { |
| 63 PopulateDeoptimizationLiteralsWithInlinedFunctions(); | 64 PopulateDeoptimizationLiteralsWithInlinedFunctions(); |
| 64 } | 65 } |
| 65 | 66 |
| 67 // Simple accessors. |
| 68 Scope* scope() const { return scope_; } |
| 66 | 69 |
| 67 int LookupDestination(int block_id) const { | 70 int LookupDestination(int block_id) const { |
| 68 return chunk()->LookupDestination(block_id); | 71 return chunk()->LookupDestination(block_id); |
| 69 } | 72 } |
| 70 | 73 |
| 71 bool IsNextEmittedBlock(int block_id) const { | 74 bool IsNextEmittedBlock(int block_id) const { |
| 72 return LookupDestination(block_id) == GetNextEmittedBlock(); | 75 return LookupDestination(block_id) == GetNextEmittedBlock(); |
| 73 } | 76 } |
| 74 | 77 |
| 75 bool NeedsEagerFrame() const { | 78 bool NeedsEagerFrame() const { |
| 76 return GetStackSlotCount() > 0 || | 79 return GetStackSlotCount() > 0 || |
| 77 info()->is_non_deferred_calling() || | 80 info()->is_non_deferred_calling() || |
| 78 !info()->IsStub() || | 81 !info()->IsStub() || |
| 79 info()->requires_frame(); | 82 info()->requires_frame(); |
| 80 } | 83 } |
| 81 bool NeedsDeferredFrame() const { | 84 bool NeedsDeferredFrame() const { |
| 82 return !NeedsEagerFrame() && info()->is_deferred_calling(); | 85 return !NeedsEagerFrame() && info()->is_deferred_calling(); |
| 83 } | 86 } |
| 84 | 87 |
| 85 LinkRegisterStatus GetLinkRegisterState() const { | 88 LinkRegisterStatus GetLinkRegisterState() const { |
| 86 return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved; | 89 return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved; |
| 87 } | 90 } |
| 88 | 91 |
| 89 // Support for converting LOperands to assembler types. | |
| 90 // LOperand must be a register. | |
| 91 Register ToRegister(LOperand* op) const; | |
| 92 | |
| 93 // LOperand is loaded into scratch, unless already a register. | |
| 94 Register EmitLoadRegister(LOperand* op, Register scratch); | |
| 95 | |
| 96 // LOperand must be a double register. | |
| 97 DwVfpRegister ToDoubleRegister(LOperand* op) const; | |
| 98 | |
| 99 // LOperand is loaded into dbl_scratch, unless already a double register. | |
| 100 DwVfpRegister EmitLoadDoubleRegister(LOperand* op, | |
| 101 SwVfpRegister flt_scratch, | |
| 102 DwVfpRegister dbl_scratch); | |
| 103 int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const; | |
| 104 int32_t ToInteger32(LConstantOperand* op) const; | |
| 105 Smi* ToSmi(LConstantOperand* op) const; | |
| 106 double ToDouble(LConstantOperand* op) const; | |
| 107 Operand ToOperand(LOperand* op); | |
| 108 MemOperand ToMemOperand(LOperand* op) const; | |
| 109 // Returns a MemOperand pointing to the high word of a DoubleStackSlot. | |
| 110 MemOperand ToHighMemOperand(LOperand* op) const; | |
| 111 | |
| 112 bool IsInteger32(LConstantOperand* op) const; | |
| 113 bool IsSmi(LConstantOperand* op) const; | |
| 114 Handle<Object> ToHandle(LConstantOperand* op) const; | |
| 115 | |
| 116 // Try to generate code for the entire chunk, but it may fail if the | 92 // Try to generate code for the entire chunk, but it may fail if the |
| 117 // chunk contains constructs we cannot handle. Returns true if the | 93 // chunk contains constructs we cannot handle. Returns true if the |
| 118 // code generation attempt succeeded. | 94 // code generation attempt succeeded. |
| 119 bool GenerateCode(); | 95 bool GenerateCode(); |
| 120 | 96 |
| 121 // Finish the code by setting stack height, safepoint, and bailout | 97 // Finish the code by setting stack height, safepoint, and bailout |
| 122 // information on it. | 98 // information on it. |
| 123 void FinishCode(Handle<Code> code); | 99 void FinishCode(Handle<Code> code); |
| 124 | 100 |
| 125 // Deferred code support. | 101 // Support for converting LOperands to assembler types. |
| 126 void DoDeferredNumberTagD(LNumberTagD* instr); | 102 // LOperand must be a register. |
| 103 Register ToRegister(LOperand* op) const; |
| 104 Register ToRegister32(LOperand* op) const; |
| 105 Operand ToOperand(LOperand* op); |
| 106 Operand ToOperand32I(LOperand* op); |
| 107 Operand ToOperand32U(LOperand* op); |
| 108 MemOperand ToMemOperand(LOperand* op) const; |
| 109 Handle<Object> ToHandle(LConstantOperand* op) const; |
| 127 | 110 |
| 128 enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; | 111 // TODO(jbramley): Examine these helpers and check that they make sense. |
| 129 void DoDeferredNumberTagI(LInstruction* instr, | 112 // IsInteger32Constant returns true for smi constants, for example. |
| 130 LOperand* value, | 113 bool IsInteger32Constant(LConstantOperand* op) const; |
| 131 IntegerSignedness signedness); | 114 bool IsSmi(LConstantOperand* op) const; |
| 132 | 115 |
| 133 void DoDeferredTaggedToI(LTaggedToI* instr); | 116 int32_t ToInteger32(LConstantOperand* op) const; |
| 134 void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); | 117 Smi* ToSmi(LConstantOperand* op) const; |
| 135 void DoDeferredStackCheck(LStackCheck* instr); | 118 double ToDouble(LConstantOperand* op) const; |
| 136 void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); | 119 DoubleRegister ToDoubleRegister(LOperand* op) const; |
| 137 void DoDeferredStringCharFromCode(LStringCharFromCode* instr); | |
| 138 void DoDeferredAllocate(LAllocate* instr); | |
| 139 void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, | |
| 140 Label* map_check); | |
| 141 void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); | |
| 142 | |
| 143 // Parallel move support. | |
| 144 void DoParallelMove(LParallelMove* move); | |
| 145 void DoGap(LGap* instr); | |
| 146 | |
| 147 MemOperand PrepareKeyedOperand(Register key, | |
| 148 Register base, | |
| 149 bool key_is_constant, | |
| 150 int constant_key, | |
| 151 int element_size, | |
| 152 int shift_size, | |
| 153 int additional_index, | |
| 154 int additional_offset); | |
| 155 | |
| 156 // Emit frame translation commands for an environment. | |
| 157 void WriteTranslation(LEnvironment* environment, Translation* translation); | |
| 158 | 120 |
| 159 // Declare methods that deal with the individual node types. | 121 // Declare methods that deal with the individual node types. |
| 160 #define DECLARE_DO(type) void Do##type(L##type* node); | 122 #define DECLARE_DO(type) void Do##type(L##type* node); |
| 161 LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) | 123 LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) |
| 162 #undef DECLARE_DO | 124 #undef DECLARE_DO |
| 163 | 125 |
| 164 private: | 126 private: |
| 165 StrictModeFlag strict_mode_flag() const { | 127 // Return a double scratch register which can be used locally |
| 166 return info()->is_classic_mode() ? kNonStrictMode : kStrictMode; | 128 // when generating code for a lithium instruction. |
| 167 } | 129 DoubleRegister double_scratch() { return crankshaft_fp_scratch; } |
| 168 | 130 |
| 169 Scope* scope() const { return scope_; } | 131 // Deferred code support. |
| 132 void DoDeferredNumberTagD(LNumberTagD* instr); |
| 133 void DoDeferredStackCheck(LStackCheck* instr); |
| 134 void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); |
| 135 void DoDeferredStringCharFromCode(LStringCharFromCode* instr); |
| 136 void DoDeferredMathAbsTagged(LMathAbsTagged* instr, |
| 137 Label* exit, |
| 138 Label* allocation_entry); |
| 170 | 139 |
| 171 Register scratch0() { return r9; } | 140 enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; |
| 172 LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; } | 141 void DoDeferredNumberTagU(LInstruction* instr, |
| 142 LOperand* value, |
| 143 LOperand* temp1, |
| 144 LOperand* temp2); |
| 145 void DoDeferredTaggedToI(LTaggedToI* instr, |
| 146 LOperand* value, |
| 147 LOperand* temp1, |
| 148 LOperand* temp2); |
| 149 void DoDeferredAllocate(LAllocate* instr); |
| 150 void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr); |
| 151 void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); |
| 173 | 152 |
| 174 LInstruction* GetNextInstruction(); | 153 Operand ToOperand32(LOperand* op, IntegerSignedness signedness); |
| 175 | 154 |
| 176 void EmitClassOfTest(Label* if_true, | 155 static Condition TokenToCondition(Token::Value op, bool is_unsigned); |
| 177 Label* if_false, | 156 void EmitGoto(int block); |
| 178 Handle<String> class_name, | 157 void DoGap(LGap* instr); |
| 179 Register input, | 158 |
| 180 Register temporary, | 159 // Generic version of EmitBranch. It contains some code to avoid emitting a |
| 181 Register temporary2); | 160 // branch on the next emitted basic block where we could just fall-through. |
| 161 // You shouldn't use that directly but rather consider one of the helper like |
| 162 // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch... |
| 163 template<class InstrType> |
| 164 void EmitBranchGeneric(InstrType instr, |
| 165 const BranchGenerator& branch); |
| 166 |
| 167 template<class InstrType> |
| 168 void EmitBranch(InstrType instr, Condition condition); |
| 169 |
| 170 template<class InstrType> |
| 171 void EmitCompareAndBranch(InstrType instr, |
| 172 Condition condition, |
| 173 const Register& lhs, |
| 174 const Operand& rhs); |
| 175 |
| 176 template<class InstrType> |
| 177 void EmitTestAndBranch(InstrType instr, |
| 178 Condition condition, |
| 179 const Register& value, |
| 180 uint64_t mask); |
| 181 |
| 182 template<class InstrType> |
| 183 void EmitBranchIfNonZeroNumber(InstrType instr, |
| 184 const FPRegister& value, |
| 185 const FPRegister& scratch); |
| 186 |
| 187 template<class InstrType> |
| 188 void EmitBranchIfHeapNumber(InstrType instr, |
| 189 const Register& value); |
| 190 |
| 191 template<class InstrType> |
| 192 void EmitBranchIfRoot(InstrType instr, |
| 193 const Register& value, |
| 194 Heap::RootListIndex index); |
| 195 |
| 196 // Emits optimized code to deep-copy the contents of statically known object |
| 197 // graphs (e.g. object literal boilerplate). Expects a pointer to the |
| 198 // allocated destination object in the result register, and a pointer to the |
| 199 // source object in the source register. |
| 200 void EmitDeepCopy(Handle<JSObject> object, |
| 201 Register result, |
| 202 Register source, |
| 203 Register scratch, |
| 204 int* offset, |
| 205 AllocationSiteMode mode); |
| 206 |
| 207 // Emits optimized code for %_IsString(x). Preserves input register. |
| 208 // Returns the condition on which a final split to |
| 209 // true and false label should be made, to optimize fallthrough. |
| 210 Condition EmitIsString(Register input, Register temp1, Label* is_not_string, |
| 211 SmiCheck check_needed); |
| 212 |
| 213 int DefineDeoptimizationLiteral(Handle<Object> literal); |
| 214 void PopulateDeoptimizationData(Handle<Code> code); |
| 215 void PopulateDeoptimizationLiteralsWithInlinedFunctions(); |
| 216 |
| 217 MemOperand BuildSeqStringOperand(Register string, |
| 218 Register temp, |
| 219 LOperand* index, |
| 220 String::Encoding encoding); |
| 221 Deoptimizer::BailoutType DeoptimizeHeader( |
| 222 LEnvironment* environment, |
| 223 Deoptimizer::BailoutType* override_bailout_type); |
| 224 void Deoptimize(LEnvironment* environment); |
| 225 void Deoptimize(LEnvironment* environment, |
| 226 Deoptimizer::BailoutType bailout_type); |
| 227 void DeoptimizeIf(Condition cc, LEnvironment* environment); |
| 228 void DeoptimizeIfZero(Register rt, LEnvironment* environment); |
| 229 void DeoptimizeIfNegative(Register rt, LEnvironment* environment); |
| 230 void DeoptimizeIfSmi(Register rt, LEnvironment* environment); |
| 231 void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment); |
| 232 void DeoptimizeIfRoot(Register rt, |
| 233 Heap::RootListIndex index, |
| 234 LEnvironment* environment); |
| 235 void DeoptimizeIfNotRoot(Register rt, |
| 236 Heap::RootListIndex index, |
| 237 LEnvironment* environment); |
| 238 void ApplyCheckIf(Condition cc, LBoundsCheck* check); |
| 239 |
| 240 MemOperand PrepareKeyedExternalArrayOperand(Register key, |
| 241 Register base, |
| 242 Register scratch, |
| 243 bool key_is_smi, |
| 244 bool key_is_constant, |
| 245 int constant_key, |
| 246 ElementsKind elements_kind, |
| 247 int additional_index); |
| 248 void CalcKeyedArrayBaseRegister(Register base, |
| 249 Register elements, |
| 250 Register key, |
| 251 bool key_is_tagged, |
| 252 ElementsKind elements_kind); |
| 253 |
| 254 void RegisterEnvironmentForDeoptimization(LEnvironment* environment, |
| 255 Safepoint::DeoptMode mode); |
| 182 | 256 |
| 183 int GetStackSlotCount() const { return chunk()->spill_slot_count(); } | 257 int GetStackSlotCount() const { return chunk()->spill_slot_count(); } |
| 184 | 258 |
| 185 void Abort(BailoutReason reason); | 259 void Abort(BailoutReason reason); |
| 186 | 260 |
| 187 void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } | 261 void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } |
| 188 | 262 |
| 263 // Emit frame translation commands for an environment. |
| 264 void WriteTranslation(LEnvironment* environment, Translation* translation); |
| 265 |
| 266 void AddToTranslation(LEnvironment* environment, |
| 267 Translation* translation, |
| 268 LOperand* op, |
| 269 bool is_tagged, |
| 270 bool is_uint32, |
| 271 int* object_index_pointer, |
| 272 int* dematerialized_index_pointer); |
| 273 |
| 189 void SaveCallerDoubles(); | 274 void SaveCallerDoubles(); |
| 190 void RestoreCallerDoubles(); | 275 void RestoreCallerDoubles(); |
| 191 | 276 |
| 192 // Code generation passes. Returns true if code generation should | 277 // Code generation steps. Returns true if code generation should continue. |
| 193 // continue. | |
| 194 bool GeneratePrologue(); | 278 bool GeneratePrologue(); |
| 195 bool GenerateDeferredCode(); | 279 bool GenerateDeferredCode(); |
| 196 bool GenerateDeoptJumpTable(); | 280 bool GenerateDeoptJumpTable(); |
| 197 bool GenerateSafepointTable(); | 281 bool GenerateSafepointTable(); |
| 198 | 282 |
| 199 // Generates the custom OSR entrypoint and sets the osr_pc_offset. | 283 // Generates the custom OSR entrypoint and sets the osr_pc_offset. |
| 200 void GenerateOsrPrologue(); | 284 void GenerateOsrPrologue(); |
| 201 | 285 |
| 202 enum SafepointMode { | 286 enum SafepointMode { |
| 203 RECORD_SIMPLE_SAFEPOINT, | 287 RECORD_SIMPLE_SAFEPOINT, |
| 204 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS | 288 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS |
| 205 }; | 289 }; |
| 206 | 290 |
| 207 void CallCode( | 291 void CallCode(Handle<Code> code, |
| 208 Handle<Code> code, | 292 RelocInfo::Mode mode, |
| 209 RelocInfo::Mode mode, | 293 LInstruction* instr); |
| 210 LInstruction* instr, | |
| 211 TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS); | |
| 212 | 294 |
| 213 void CallCodeGeneric( | 295 void CallCodeGeneric(Handle<Code> code, |
| 214 Handle<Code> code, | 296 RelocInfo::Mode mode, |
| 215 RelocInfo::Mode mode, | 297 LInstruction* instr, |
| 216 LInstruction* instr, | 298 SafepointMode safepoint_mode); |
| 217 SafepointMode safepoint_mode, | |
| 218 TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS); | |
| 219 | 299 |
| 220 void CallRuntime(const Runtime::Function* function, | 300 void CallRuntime(const Runtime::Function* function, |
| 221 int num_arguments, | 301 int num_arguments, |
| 222 LInstruction* instr, | 302 LInstruction* instr, |
| 223 SaveFPRegsMode save_doubles = kDontSaveFPRegs); | 303 SaveFPRegsMode save_doubles = kDontSaveFPRegs); |
| 224 | 304 |
| 225 void CallRuntime(Runtime::FunctionId id, | 305 void CallRuntime(Runtime::FunctionId id, |
| 226 int num_arguments, | 306 int num_arguments, |
| 227 LInstruction* instr) { | 307 LInstruction* instr) { |
| 228 const Runtime::Function* function = Runtime::FunctionForId(id); | 308 const Runtime::Function* function = Runtime::FunctionForId(id); |
| 229 CallRuntime(function, num_arguments, instr); | 309 CallRuntime(function, num_arguments, instr); |
| 230 } | 310 } |
| 231 | 311 |
| 232 void LoadContextFromDeferred(LOperand* context); | 312 void LoadContextFromDeferred(LOperand* context); |
| 233 void CallRuntimeFromDeferred(Runtime::FunctionId id, | 313 void CallRuntimeFromDeferred(Runtime::FunctionId id, |
| 234 int argc, | 314 int argc, |
| 235 LInstruction* instr, | 315 LInstruction* instr, |
| 236 LOperand* context); | 316 LOperand* context); |
| 237 | 317 |
| 238 enum R1State { | 318 // Generate a direct call to a known function. |
| 239 R1_UNINITIALIZED, | 319 // If the function is already loaded into x1 by the caller, function_reg may |
| 240 R1_CONTAINS_TARGET | 320 // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will |
| 241 }; | 321 // automatically load it. |
| 242 | |
| 243 // Generate a direct call to a known function. Expects the function | |
| 244 // to be in r1. | |
| 245 void CallKnownFunction(Handle<JSFunction> function, | 322 void CallKnownFunction(Handle<JSFunction> function, |
| 246 int formal_parameter_count, | 323 int formal_parameter_count, |
| 247 int arity, | 324 int arity, |
| 248 LInstruction* instr, | 325 LInstruction* instr, |
| 249 R1State r1_state); | 326 Register function_reg = NoReg); |
| 250 | |
| 251 void RecordSafepointWithLazyDeopt(LInstruction* instr, | |
| 252 SafepointMode safepoint_mode); | |
| 253 | |
| 254 void RegisterEnvironmentForDeoptimization(LEnvironment* environment, | |
| 255 Safepoint::DeoptMode mode); | |
| 256 void DeoptimizeIf(Condition condition, | |
| 257 LEnvironment* environment, | |
| 258 Deoptimizer::BailoutType bailout_type); | |
| 259 void DeoptimizeIf(Condition condition, LEnvironment* environment); | |
| 260 void ApplyCheckIf(Condition condition, LBoundsCheck* check); | |
| 261 | |
| 262 void AddToTranslation(LEnvironment* environment, | |
| 263 Translation* translation, | |
| 264 LOperand* op, | |
| 265 bool is_tagged, | |
| 266 bool is_uint32, | |
| 267 int* object_index_pointer, | |
| 268 int* dematerialized_index_pointer); | |
| 269 void PopulateDeoptimizationData(Handle<Code> code); | |
| 270 int DefineDeoptimizationLiteral(Handle<Object> literal); | |
| 271 | |
| 272 void PopulateDeoptimizationLiteralsWithInlinedFunctions(); | |
| 273 | |
| 274 Register ToRegister(int index) const; | |
| 275 DwVfpRegister ToDoubleRegister(int index) const; | |
| 276 | |
| 277 MemOperand BuildSeqStringOperand(Register string, | |
| 278 LOperand* index, | |
| 279 String::Encoding encoding); | |
| 280 | |
| 281 void EmitIntegerMathAbs(LMathAbs* instr); | |
| 282 | 327 |
| 283 // Support for recording safepoint and position information. | 328 // Support for recording safepoint and position information. |
| 329 void RecordAndWritePosition(int position) V8_OVERRIDE; |
| 284 void RecordSafepoint(LPointerMap* pointers, | 330 void RecordSafepoint(LPointerMap* pointers, |
| 285 Safepoint::Kind kind, | 331 Safepoint::Kind kind, |
| 286 int arguments, | 332 int arguments, |
| 287 Safepoint::DeoptMode mode); | 333 Safepoint::DeoptMode mode); |
| 288 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); | 334 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode); |
| 289 void RecordSafepoint(Safepoint::DeoptMode mode); | 335 void RecordSafepoint(Safepoint::DeoptMode mode); |
| 290 void RecordSafepointWithRegisters(LPointerMap* pointers, | 336 void RecordSafepointWithRegisters(LPointerMap* pointers, |
| 291 int arguments, | 337 int arguments, |
| 292 Safepoint::DeoptMode mode); | 338 Safepoint::DeoptMode mode); |
| 293 void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers, | 339 void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers, |
| 294 int arguments, | 340 int arguments, |
| 295 Safepoint::DeoptMode mode); | 341 Safepoint::DeoptMode mode); |
| 296 | 342 void RecordSafepointWithLazyDeopt(LInstruction* instr, |
| 297 void RecordAndWritePosition(int position) V8_OVERRIDE; | 343 SafepointMode safepoint_mode); |
| 298 | |
| 299 static Condition TokenToCondition(Token::Value op, bool is_unsigned); | |
| 300 void EmitGoto(int block); | |
| 301 | |
| 302 // EmitBranch expects to be the last instruction of a block. | |
| 303 template<class InstrType> | |
| 304 void EmitBranch(InstrType instr, Condition condition); | |
| 305 template<class InstrType> | |
| 306 void EmitFalseBranch(InstrType instr, Condition condition); | |
| 307 void EmitNumberUntagD(Register input, | |
| 308 DwVfpRegister result, | |
| 309 bool allow_undefined_as_nan, | |
| 310 bool deoptimize_on_minus_zero, | |
| 311 LEnvironment* env, | |
| 312 NumberUntagDMode mode); | |
| 313 | |
| 314 // Emits optimized code for typeof x == "y". Modifies input register. | |
| 315 // Returns the condition on which a final split to | |
| 316 // true and false label should be made, to optimize fallthrough. | |
| 317 Condition EmitTypeofIs(Label* true_label, | |
| 318 Label* false_label, | |
| 319 Register input, | |
| 320 Handle<String> type_name); | |
| 321 | |
| 322 // Emits optimized code for %_IsObject(x). Preserves input register. | |
| 323 // Returns the condition on which a final split to | |
| 324 // true and false label should be made, to optimize fallthrough. | |
| 325 Condition EmitIsObject(Register input, | |
| 326 Register temp1, | |
| 327 Label* is_not_object, | |
| 328 Label* is_object); | |
| 329 | |
| 330 // Emits optimized code for %_IsString(x). Preserves input register. | |
| 331 // Returns the condition on which a final split to | |
| 332 // true and false label should be made, to optimize fallthrough. | |
| 333 Condition EmitIsString(Register input, | |
| 334 Register temp1, | |
| 335 Label* is_not_string, | |
| 336 SmiCheck check_needed); | |
| 337 | |
| 338 // Emits optimized code for %_IsConstructCall(). | |
| 339 // Caller should branch on equal condition. | |
| 340 void EmitIsConstructCall(Register temp1, Register temp2); | |
| 341 | |
| 342 // Emits optimized code to deep-copy the contents of statically known | |
| 343 // object graphs (e.g. object literal boilerplate). | |
| 344 void EmitDeepCopy(Handle<JSObject> object, | |
| 345 Register result, | |
| 346 Register source, | |
| 347 int* offset, | |
| 348 AllocationSiteMode mode); | |
| 349 | |
| 350 // Emit optimized code for integer division. | |
| 351 // Inputs are signed. | |
| 352 // All registers are clobbered. | |
| 353 // If 'remainder' is no_reg, it is not computed. | |
| 354 void EmitSignedIntegerDivisionByConstant(Register result, | |
| 355 Register dividend, | |
| 356 int32_t divisor, | |
| 357 Register remainder, | |
| 358 Register scratch, | |
| 359 LEnvironment* environment); | |
| 360 | 344 |
| 361 void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE; | 345 void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE; |
| 362 void DoLoadKeyedExternalArray(LLoadKeyed* instr); | |
| 363 void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); | |
| 364 void DoLoadKeyedFixedArray(LLoadKeyed* instr); | |
| 365 void DoStoreKeyedExternalArray(LStoreKeyed* instr); | |
| 366 void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); | |
| 367 void DoStoreKeyedFixedArray(LStoreKeyed* instr); | |
| 368 | 346 |
| 369 ZoneList<LEnvironment*> deoptimizations_; | 347 ZoneList<LEnvironment*> deoptimizations_; |
| 370 ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_; | 348 ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_; |
| 371 ZoneList<Handle<Object> > deoptimization_literals_; | 349 ZoneList<Handle<Object> > deoptimization_literals_; |
| 372 int inlined_function_count_; | 350 int inlined_function_count_; |
| 373 Scope* const scope_; | 351 Scope* const scope_; |
| 374 TranslationBuffer translations_; | 352 TranslationBuffer translations_; |
| 375 ZoneList<LDeferredCode*> deferred_; | 353 ZoneList<LDeferredCode*> deferred_; |
| 376 int osr_pc_offset_; | 354 int osr_pc_offset_; |
| 377 bool frame_is_built_; | 355 bool frame_is_built_; |
| 378 | 356 |
| 379 // Builder that keeps track of safepoints in the code. The table | 357 // Builder that keeps track of safepoints in the code. The table itself is |
| 380 // itself is emitted at the end of the generated code. | 358 // emitted at the end of the generated code. |
| 381 SafepointTableBuilder safepoints_; | 359 SafepointTableBuilder safepoints_; |
| 382 | 360 |
| 383 // Compiler from a set of parallel moves to a sequential list of moves. | 361 // Compiler from a set of parallel moves to a sequential list of moves. |
| 384 LGapResolver resolver_; | 362 LGapResolver resolver_; |
| 385 | 363 |
| 386 Safepoint::Kind expected_safepoint_kind_; | 364 Safepoint::Kind expected_safepoint_kind_; |
| 387 | 365 |
| 388 class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED { | 366 int old_position_; |
| 367 |
| 368 class PushSafepointRegistersScope BASE_EMBEDDED { |
| 389 public: | 369 public: |
| 390 PushSafepointRegistersScope(LCodeGen* codegen, | 370 PushSafepointRegistersScope(LCodeGen* codegen, |
| 391 Safepoint::Kind kind) | 371 Safepoint::Kind kind) |
| 392 : codegen_(codegen) { | 372 : codegen_(codegen) { |
| 393 ASSERT(codegen_->info()->is_calling()); | 373 ASSERT(codegen_->info()->is_calling()); |
| 394 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); | 374 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); |
| 395 codegen_->expected_safepoint_kind_ = kind; | 375 codegen_->expected_safepoint_kind_ = kind; |
| 396 | 376 |
| 397 switch (codegen_->expected_safepoint_kind_) { | 377 switch (codegen_->expected_safepoint_kind_) { |
| 398 case Safepoint::kWithRegisters: | 378 case Safepoint::kWithRegisters: |
| 399 codegen_->masm_->PushSafepointRegisters(); | 379 codegen_->masm_->PushSafepointRegisters(); |
| 400 break; | 380 break; |
| 401 case Safepoint::kWithRegistersAndDoubles: | 381 case Safepoint::kWithRegistersAndDoubles: |
| 402 codegen_->masm_->PushSafepointRegistersAndDoubles(); | 382 codegen_->masm_->PushSafepointRegisters(); |
| 383 codegen_->masm_->PushSafepointFPRegisters(); |
| 403 break; | 384 break; |
| 404 default: | 385 default: |
| 405 UNREACHABLE(); | 386 UNREACHABLE(); |
| 406 } | 387 } |
| 407 } | 388 } |
| 408 | 389 |
| 409 ~PushSafepointRegistersScope() { | 390 ~PushSafepointRegistersScope() { |
| 410 Safepoint::Kind kind = codegen_->expected_safepoint_kind_; | 391 Safepoint::Kind kind = codegen_->expected_safepoint_kind_; |
| 411 ASSERT((kind & Safepoint::kWithRegisters) != 0); | 392 ASSERT((kind & Safepoint::kWithRegisters) != 0); |
| 412 switch (kind) { | 393 switch (kind) { |
| 413 case Safepoint::kWithRegisters: | 394 case Safepoint::kWithRegisters: |
| 414 codegen_->masm_->PopSafepointRegisters(); | 395 codegen_->masm_->PopSafepointRegisters(); |
| 415 break; | 396 break; |
| 416 case Safepoint::kWithRegistersAndDoubles: | 397 case Safepoint::kWithRegistersAndDoubles: |
| 417 codegen_->masm_->PopSafepointRegistersAndDoubles(); | 398 codegen_->masm_->PopSafepointFPRegisters(); |
| 399 codegen_->masm_->PopSafepointRegisters(); |
| 418 break; | 400 break; |
| 419 default: | 401 default: |
| 420 UNREACHABLE(); | 402 UNREACHABLE(); |
| 421 } | 403 } |
| 422 codegen_->expected_safepoint_kind_ = Safepoint::kSimple; | 404 codegen_->expected_safepoint_kind_ = Safepoint::kSimple; |
| 423 } | 405 } |
| 424 | 406 |
| 425 private: | 407 private: |
| 426 LCodeGen* codegen_; | 408 LCodeGen* codegen_; |
| 427 }; | 409 }; |
| 428 | 410 |
| 429 friend class LDeferredCode; | 411 friend class LDeferredCode; |
| 430 friend class LEnvironment; | |
| 431 friend class SafepointGenerator; | 412 friend class SafepointGenerator; |
| 432 DISALLOW_COPY_AND_ASSIGN(LCodeGen); | 413 DISALLOW_COPY_AND_ASSIGN(LCodeGen); |
| 433 }; | 414 }; |
| 434 | 415 |
| 435 | 416 |
| 436 class LDeferredCode : public ZoneObject { | 417 class LDeferredCode: public ZoneObject { |
| 437 public: | 418 public: |
| 438 explicit LDeferredCode(LCodeGen* codegen) | 419 explicit LDeferredCode(LCodeGen* codegen) |
| 439 : codegen_(codegen), | 420 : codegen_(codegen), |
| 440 external_exit_(NULL), | 421 external_exit_(NULL), |
| 441 instruction_index_(codegen->current_instruction_) { | 422 instruction_index_(codegen->current_instruction_) { |
| 442 codegen->AddDeferredCode(this); | 423 codegen->AddDeferredCode(this); |
| 443 } | 424 } |
| 444 | 425 |
| 445 virtual ~LDeferredCode() {} | 426 virtual ~LDeferredCode() { } |
| 446 virtual void Generate() = 0; | 427 virtual void Generate() = 0; |
| 447 virtual LInstruction* instr() = 0; | 428 virtual LInstruction* instr() = 0; |
| 448 | 429 |
| 449 void SetExit(Label* exit) { external_exit_ = exit; } | 430 void SetExit(Label* exit) { external_exit_ = exit; } |
| 450 Label* entry() { return &entry_; } | 431 Label* entry() { return &entry_; } |
| 451 Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } | 432 Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; } |
| 452 int instruction_index() const { return instruction_index_; } | 433 int instruction_index() const { return instruction_index_; } |
| 453 | 434 |
| 454 protected: | 435 protected: |
| 455 LCodeGen* codegen() const { return codegen_; } | 436 LCodeGen* codegen() const { return codegen_; } |
| 456 MacroAssembler* masm() const { return codegen_->masm(); } | 437 MacroAssembler* masm() const { return codegen_->masm(); } |
| 457 | 438 |
| 458 private: | 439 private: |
| 459 LCodeGen* codegen_; | 440 LCodeGen* codegen_; |
| 460 Label entry_; | 441 Label entry_; |
| 461 Label exit_; | 442 Label exit_; |
| 462 Label* external_exit_; | 443 Label* external_exit_; |
| 463 int instruction_index_; | 444 int instruction_index_; |
| 464 }; | 445 }; |
| 465 | 446 |
| 447 |
| 448 // This is the abstract class used by EmitBranchGeneric. |
| 449 // It is used to emit code for conditional branching. The Emit() function |
| 450 // emits code to branch when the condition holds and EmitInverted() emits |
| 451 // the branch when the inverted condition is verified. |
| 452 // |
| 453 // For actual examples of condition see the concrete implementation in |
| 454 // lithium-codegen-a64.cc (e.g. BranchOnCondition, CompareAndBranch). |
| 455 class BranchGenerator BASE_EMBEDDED { |
| 456 public: |
| 457 explicit BranchGenerator(LCodeGen* codegen) |
| 458 : codegen_(codegen) { } |
| 459 |
| 460 virtual ~BranchGenerator() { } |
| 461 |
| 462 virtual void Emit(Label* label) const = 0; |
| 463 virtual void EmitInverted(Label* label) const = 0; |
| 464 |
| 465 protected: |
| 466 MacroAssembler* masm() const { return codegen_->masm(); } |
| 467 |
| 468 LCodeGen* codegen_; |
| 469 }; |
| 470 |
| 466 } } // namespace v8::internal | 471 } } // namespace v8::internal |
| 467 | 472 |
| 468 #endif // V8_ARM_LITHIUM_CODEGEN_ARM_H_ | 473 #endif // V8_A64_LITHIUM_CODEGEN_A64_H_ |
| OLD | NEW |