Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(715)

Unified Diff: src/a64/lithium-codegen-a64.h

Issue 148293020: Merge experimental/a64 to bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove ARM from OWNERS Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/a64/lithium-a64.cc ('k') | src/a64/lithium-codegen-a64.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/a64/lithium-codegen-a64.h
diff --git a/src/arm/lithium-codegen-arm.h b/src/a64/lithium-codegen-a64.h
similarity index 62%
copy from src/arm/lithium-codegen-arm.h
copy to src/a64/lithium-codegen-a64.h
index 5251b85fa91e212e7832ab5b96821adedad09f6a..006165157f46740fd44428a3a4322eb64c56ad7b 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/a64/lithium-codegen-a64.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,12 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
-#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
+#ifndef V8_A64_LITHIUM_CODEGEN_A64_H_
+#define V8_A64_LITHIUM_CODEGEN_A64_H_
-#include "arm/lithium-arm.h"
+#include "a64/lithium-a64.h"
-#include "arm/lithium-gap-resolver-arm.h"
+#include "a64/lithium-gap-resolver-a64.h"
#include "deoptimizer.h"
#include "lithium-codegen.h"
#include "safepoint-table.h"
@@ -43,6 +43,7 @@ namespace internal {
// Forward declarations.
class LDeferredCode;
class SafepointGenerator;
+class BranchGenerator;
class LCodeGen: public LCodeGenBase {
public:
@@ -63,6 +64,8 @@ class LCodeGen: public LCodeGenBase {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
+ // Simple accessors.
+ Scope* scope() const { return scope_; }
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
@@ -86,99 +89,170 @@ class LCodeGen: public LCodeGenBase {
return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
}
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode();
+
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code);
+
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
+ Register ToRegister32(LOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ Operand ToOperand32I(LOperand* op);
+ Operand ToOperand32U(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LOperand must be a double register.
- DwVfpRegister ToDoubleRegister(LOperand* op) const;
+ // TODO(jbramley): Examine these helpers and check that they make sense.
+ // IsInteger32Constant returns true for smi constants, for example.
+ bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
- // LOperand is loaded into dbl_scratch, unless already a double register.
- DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DwVfpRegister dbl_scratch);
- int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
- // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
- MemOperand ToHighMemOperand(LOperand* op) const;
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
- bool IsInteger32(LConstantOperand* op) const;
- bool IsSmi(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
+ private:
+ // Return a double scratch register which can be used locally
+ // when generating code for a lithium instruction.
+ DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+ Label* exit,
+ Label* allocation_entry);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2);
+ void DoDeferredTaggedToI(LTaggedToI* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2);
void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
+ Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
+
+ static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+ void EmitGoto(int block);
void DoGap(LGap* instr);
- MemOperand PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int additional_index,
- int additional_offset);
+ // Generic version of EmitBranch. It contains some code to avoid emitting a
+ // branch on the next emitted basic block where we could just fall-through.
+ // You shouldn't use that directly but rather consider one of the helper like
+ // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
+ template<class InstrType>
+ void EmitBranchGeneric(InstrType instr,
+ const BranchGenerator& branch);
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition condition);
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
+ template<class InstrType>
+ void EmitCompareAndBranch(InstrType instr,
+ Condition condition,
+ const Register& lhs,
+ const Operand& rhs);
- private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ template<class InstrType>
+ void EmitTestAndBranch(InstrType instr,
+ Condition condition,
+ const Register& value,
+ uint64_t mask);
- Scope* scope() const { return scope_; }
+ template<class InstrType>
+ void EmitBranchIfNonZeroNumber(InstrType instr,
+ const FPRegister& value,
+ const FPRegister& scratch);
+
+ template<class InstrType>
+ void EmitBranchIfHeapNumber(InstrType instr,
+ const Register& value);
+
+ template<class InstrType>
+ void EmitBranchIfRoot(InstrType instr,
+ const Register& value,
+ Heap::RootListIndex index);
+
+ // Emits optimized code to deep-copy the contents of statically known object
+ // graphs (e.g. object literal boilerplate). Expects a pointer to the
+ // allocated destination object in the result register, and a pointer to the
+ // source object in the source register.
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ Register scratch,
+ int* offset,
+ AllocationSiteMode mode);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+ SmiCheck check_needed);
- Register scratch0() { return r9; }
- LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; }
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
- LInstruction* GetNextInstruction();
+ MemOperand BuildSeqStringOperand(Register string,
+ Register temp,
+ LOperand* index,
+ String::Encoding encoding);
+ Deoptimizer::BailoutType DeoptimizeHeader(
+ LEnvironment* environment,
+ Deoptimizer::BailoutType* override_bailout_type);
+ void Deoptimize(LEnvironment* environment);
+ void Deoptimize(LEnvironment* environment,
+ Deoptimizer::BailoutType bailout_type);
+ void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIfZero(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
+ void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
+ void DeoptimizeIfRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment);
+ void DeoptimizeIfNotRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment);
+ void ApplyCheckIf(Condition cc, LBoundsCheck* check);
+
+ MemOperand PrepareKeyedExternalArrayOperand(Register key,
+ Register base,
+ Register scratch,
+ bool key_is_smi,
+ bool key_is_constant,
+ int constant_key,
+ ElementsKind elements_kind,
+ int additional_index);
+ void CalcKeyedArrayBaseRegister(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind);
- void EmitClassOfTest(Label* if_true,
- Label* if_false,
- Handle<String> class_name,
- Register input,
- Register temporary,
- Register temporary2);
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
@@ -186,11 +260,21 @@ class LCodeGen: public LCodeGenBase {
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+
void SaveCallerDoubles();
void RestoreCallerDoubles();
- // Code generation passes. Returns true if code generation should
- // continue.
+ // Code generation steps. Returns true if code generation should continue.
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
@@ -204,18 +288,14 @@ class LCodeGen: public LCodeGenBase {
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
- void CallCode(
- Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr);
- void CallCodeGeneric(
- Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
@@ -235,52 +315,18 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr,
LOperand* context);
- enum R1State {
- R1_UNINITIALIZED,
- R1_CONTAINS_TARGET
- };
-
- // Generate a direct call to a known function. Expects the function
- // to be in r1.
+ // Generate a direct call to a known function.
+ // If the function is already loaded into x1 by the caller, function_reg may
+ // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
+ // automatically load it.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
- R1State r1_state);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition condition,
- LEnvironment* environment,
- Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition condition, LEnvironment* environment);
- void ApplyCheckIf(Condition condition, LBoundsCheck* check);
-
- void AddToTranslation(LEnvironment* environment,
- Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- Register ToRegister(int index) const;
- DwVfpRegister ToDoubleRegister(int index) const;
-
- MemOperand BuildSeqStringOperand(Register string,
- LOperand* index,
- String::Encoding encoding);
-
- void EmitIntegerMathAbs(LMathAbs* instr);
+ Register function_reg = NoReg);
// Support for recording safepoint and position information.
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
@@ -293,78 +339,10 @@ class LCodeGen: public LCodeGenBase {
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
-
- void RecordAndWritePosition(int position) V8_OVERRIDE;
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
-
- // EmitBranch expects to be the last instruction of a block.
- template<class InstrType>
- void EmitBranch(InstrType instr, Condition condition);
- template<class InstrType>
- void EmitFalseBranch(InstrType instr, Condition condition);
- void EmitNumberUntagD(Register input,
- DwVfpRegister result,
- bool allow_undefined_as_nan,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
-
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string,
- SmiCheck check_needed);
-
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
-
- // Emit optimized code for integer division.
- // Inputs are signed.
- // All registers are clobbered.
- // If 'remainder' is no_reg, it is not computed.
- void EmitSignedIntegerDivisionByConstant(Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment);
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
@@ -376,8 +354,8 @@ class LCodeGen: public LCodeGenBase {
int osr_pc_offset_;
bool frame_is_built_;
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
+ // Builder that keeps track of safepoints in the code. The table itself is
+ // emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
@@ -385,7 +363,9 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
+ int old_position_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
@@ -399,7 +379,8 @@ class LCodeGen: public LCodeGenBase {
codegen_->masm_->PushSafepointRegisters();
break;
case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PushSafepointRegistersAndDoubles();
+ codegen_->masm_->PushSafepointRegisters();
+ codegen_->masm_->PushSafepointFPRegisters();
break;
default:
UNREACHABLE();
@@ -414,7 +395,8 @@ class LCodeGen: public LCodeGenBase {
codegen_->masm_->PopSafepointRegisters();
break;
case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PopSafepointRegistersAndDoubles();
+ codegen_->masm_->PopSafepointFPRegisters();
+ codegen_->masm_->PopSafepointRegisters();
break;
default:
UNREACHABLE();
@@ -427,13 +409,12 @@ class LCodeGen: public LCodeGenBase {
};
friend class LDeferredCode;
- friend class LEnvironment;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
-class LDeferredCode : public ZoneObject {
+class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
@@ -442,13 +423,13 @@ class LDeferredCode : public ZoneObject {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() {}
+ virtual ~LDeferredCode() { }
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
@@ -463,6 +444,30 @@ class LDeferredCode : public ZoneObject {
int instruction_index_;
};
+
+// This is the abstract class used by EmitBranchGeneric.
+// It is used to emit code for conditional branching. The Emit() function
+// emits code to branch when the condition holds and EmitInverted() emits
+// the branch when the inverted condition is verified.
+//
+// For actual examples of condition see the concrete implementation in
+// lithium-codegen-a64.cc (e.g. BranchOnCondition, CompareAndBranch).
+class BranchGenerator BASE_EMBEDDED {
+ public:
+ explicit BranchGenerator(LCodeGen* codegen)
+ : codegen_(codegen) { }
+
+ virtual ~BranchGenerator() { }
+
+ virtual void Emit(Label* label) const = 0;
+ virtual void EmitInverted(Label* label) const = 0;
+
+ protected:
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ LCodeGen* codegen_;
+};
+
} } // namespace v8::internal
-#endif // V8_ARM_LITHIUM_CODEGEN_ARM_H_
+#endif // V8_A64_LITHIUM_CODEGEN_A64_H_
« no previous file with comments | « src/a64/lithium-a64.cc ('k') | src/a64/lithium-codegen-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698