Chromium Code Reviews| Index: src/arm/lithium-codegen-arm.cc |
| diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc |
| index b773ca9ea1c16d245dcebb3c9e29518fe2e91f2f..ecea10190f19104635cb90c69d3397fd52854cb7 100644 |
| --- a/src/arm/lithium-codegen-arm.cc |
| +++ b/src/arm/lithium-codegen-arm.cc |
| @@ -65,8 +65,6 @@ bool LCodeGen::GenerateCode() { |
| HPhase phase("Z_Code generation", chunk()); |
| ASSERT(is_unused()); |
| status_ = GENERATING; |
| - CpuFeatures::Scope scope1(VFP3); |
| - CpuFeatures::Scope scope2(ARMv7); |
| CodeStub::GenerateFPStubs(); |
| @@ -118,44 +116,49 @@ void LCodeGen::Comment(const char* format, ...) { |
| bool LCodeGen::GeneratePrologue() { |
| ASSERT(is_generating()); |
| - ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
| + if (info()->IsOptimizing()) { |
| + ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
| #ifdef DEBUG |
| - if (strlen(FLAG_stop_at) > 0 && |
| - info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { |
| - __ stop("stop_at"); |
| - } |
| + if (strlen(FLAG_stop_at) > 0 && |
| + info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { |
| + __ stop("stop_at"); |
| + } |
| #endif |
| - // r1: Callee's JS function. |
| - // cp: Callee's context. |
| - // fp: Caller's frame pointer. |
| - // lr: Caller's pc. |
| - |
| - // Strict mode functions and builtins need to replace the receiver |
| - // with undefined when called as functions (without an explicit |
| - // receiver object). r5 is zero for method calls and non-zero for |
| - // function calls. |
| - if (!info_->is_classic_mode() || info_->is_native()) { |
| - Label ok; |
| - Label begin; |
| - __ bind(&begin); |
| - __ cmp(r5, Operand(0)); |
| - __ b(eq, &ok); |
| - int receiver_offset = scope()->num_parameters() * kPointerSize; |
| - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
| - __ str(r2, MemOperand(sp, receiver_offset)); |
| - __ bind(&ok); |
| - ASSERT_EQ(kSizeOfOptimizedStrictModePrologue, ok.pos() - begin.pos()); |
| - } |
| - |
| - // The following three instructions must remain together and unmodified for |
| - // code aging to work properly. |
| - __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); |
| - // Add unused load of ip to ensure prologue sequence is identical for |
| - // full-codegen and lithium-codegen. |
| - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| - __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP. |
| + // r1: Callee's JS function. |
| + // cp: Callee's context. |
| + // fp: Caller's frame pointer. |
| + // lr: Caller's pc. |
| + |
| + // Strict mode functions and builtins need to replace the receiver |
| + // with undefined when called as functions (without an explicit |
| + // receiver object). r5 is zero for method calls and non-zero for |
| + // function calls. |
| + if (!info_->is_classic_mode() || info_->is_native()) { |
| + Label ok; |
| + Label begin; |
| + __ bind(&begin); |
| + __ cmp(r5, Operand(0)); |
| + __ b(eq, &ok); |
| + int receiver_offset = scope()->num_parameters() * kPointerSize; |
| + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
| + __ str(r2, MemOperand(sp, receiver_offset)); |
| + __ bind(&ok); |
| + ASSERT_EQ(kSizeOfOptimizedStrictModePrologue, ok.pos() - begin.pos()); |
| + } |
| + } |
| + |
| + if (NeedsEagerFrame()) { |
| + // The following three instructions must remain together and unmodified for |
| + // code aging to work properly. |
| + __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); |
| + // Add unused load of ip to ensure prologue sequence is identical for |
| + // full-codegen and lithium-codegen. |
| + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| + __ add(fp, sp, Operand(2 * kPointerSize)); |
| + frame_is_built_ = true; |
| + } |
| // Reserve space for the stack slots needed by the code. |
| int slots = GetStackSlotCount(); |
| @@ -174,7 +177,7 @@ bool LCodeGen::GeneratePrologue() { |
| } |
| // Possibly allocate a local context. |
| - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
| + int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
| if (heap_slots > 0) { |
| Comment(";;; Allocate local context"); |
| // Argument to NewContext is the function, which is in r1. |
| @@ -210,7 +213,7 @@ bool LCodeGen::GeneratePrologue() { |
| } |
| // Trace the call. |
| - if (FLAG_trace) { |
| + if (FLAG_trace && info()->IsOptimizing()) { |
| __ CallRuntime(Runtime::kTraceEnter, 0); |
| } |
| return !is_aborted(); |
| @@ -245,10 +248,31 @@ bool LCodeGen::GenerateDeferredCode() { |
| for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
| LDeferredCode* code = deferred_[i]; |
| __ bind(code->entry()); |
| + if (NeedsDeferredFrame()) { |
| + Comment(";;; Deferred build frame", |
| + code->instruction_index(), |
| + code->instr()->Mnemonic()); |
| + ASSERT(!frame_is_built_); |
| + ASSERT(info()->IsStub()); |
| + frame_is_built_ = true; |
| + __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); |
| + __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |
| + __ push(scratch0()); |
| + __ add(fp, sp, Operand(2 * kPointerSize)); |
| + } |
| Comment(";;; Deferred code @%d: %s.", |
| code->instruction_index(), |
| code->instr()->Mnemonic()); |
| code->Generate(); |
| + if (NeedsDeferredFrame()) { |
| + Comment(";;; Deferred destory frame", |
| + code->instruction_index(), |
| + code->instr()->Mnemonic()); |
| + ASSERT(frame_is_built_); |
| + __ pop(ip); |
| + __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit()); |
| + frame_is_built_ = false; |
| + } |
| __ jmp(code->exit()); |
| } |
| } |
| @@ -270,24 +294,72 @@ bool LCodeGen::GenerateDeoptJumpTable() { |
| // Each entry in the jump table generates one instruction and inlines one |
| // 32bit data after it. |
| if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + |
| - deopt_jump_table_.length() * 2)) { |
| + deopt_jump_table_.length() * 5)) { |
| Abort("Generated code is too large"); |
| } |
| - // Block the constant pool emission during the jump table emission. |
| - __ BlockConstPoolFor(deopt_jump_table_.length()); |
| + masm()->CheckConstPool(true, false); |
| + |
| __ RecordComment("[ Deoptimisation jump table"); |
| Label table_start; |
| __ bind(&table_start); |
| + Label needs_frame_not_call; |
| + bool has_generated_needs_frame_not_call = false; |
| + Label needs_frame_is_call; |
| + bool has_generated_needs_frame_is_call = false; |
| for (int i = 0; i < deopt_jump_table_.length(); i++) { |
| __ bind(&deopt_jump_table_[i].label); |
| - __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta)); |
| - __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address)); |
| + Address entry = deopt_jump_table_[i].address; |
| + if (deopt_jump_table_[i].needs_frame) { |
| + __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry))); |
| + if (deopt_jump_table_[i].is_call) { |
| + if (!has_generated_needs_frame_is_call) { |
| + has_generated_needs_frame_is_call = true; |
| + __ bind(&needs_frame_is_call); |
| + __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); |
| + // If there is not frame, we don't have access to the JSFunction that |
| + // needs to be put into the frame. |
| + ASSERT(info()->IsStub()); |
| + __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |
| + __ push(scratch0()); |
| + __ add(fp, sp, Operand(2 * kPointerSize)); |
| + __ mov(lr, Operand(pc), LeaveCC, al); |
| + __ mov(pc, ip); |
| + } else { |
| + __ b(&needs_frame_is_call); |
| + } |
| + } else { |
| + if (!has_generated_needs_frame_not_call) { |
| + has_generated_needs_frame_not_call = true; |
| + __ bind(&needs_frame_not_call); |
| + __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); |
| + // If there is not frame, we don't have access to the JSFunction that |
| + // needs to be put into the frame. |
| + ASSERT(info()->IsStub()); |
| + __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |
| + __ push(scratch0()); |
| + __ add(fp, sp, Operand(2 * kPointerSize)); |
| + __ mov(pc, ip); |
| + } else { |
| + __ b(&needs_frame_not_call); |
| + } |
| + } |
| + } else { |
| + if (deopt_jump_table_[i].is_call) { |
| + __ mov(lr, Operand(pc), LeaveCC, al); |
| + __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); |
| + } else { |
| + __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); |
| + } |
| + } |
| + masm()->CheckConstPool(false, false); |
| } |
| - ASSERT(masm()->InstructionsGeneratedSince(&table_start) == |
| - deopt_jump_table_.length() * 2); |
| __ RecordComment("]"); |
| + // Force constant pool emission at the end of the deopt jump table to make |
| + // sure that no constant pools are emitted after. |
| + masm()->CheckConstPool(true, false); |
| + |
| // The deoptimization jump table is the last part of the instruction |
| // sequence. Mark the generated code as done unless we bailed out. |
| if (!is_aborted()) status_ = DONE; |
| @@ -307,8 +379,8 @@ Register LCodeGen::ToRegister(int index) const { |
| } |
| -DoubleRegister LCodeGen::ToDoubleRegister(int index) const { |
| - return DoubleRegister::FromAllocationIndex(index); |
| +DwVfpRegister LCodeGen::ToDoubleRegister(int index) const { |
| + return DwVfpRegister::FromAllocationIndex(index); |
| } |
| @@ -349,15 +421,15 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { |
| } |
| -DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
| +DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
| ASSERT(op->IsDoubleRegister()); |
| return ToDoubleRegister(op->index()); |
| } |
| -DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, |
| - SwVfpRegister flt_scratch, |
| - DoubleRegister dbl_scratch) { |
| +DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, |
| + SwVfpRegister flt_scratch, |
| + DwVfpRegister dbl_scratch) { |
| if (op->IsDoubleRegister()) { |
| return ToDoubleRegister(op->index()); |
| } else if (op->IsConstantOperand()) { |
| @@ -493,7 +565,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, |
| translation, |
| arguments_index, |
| arguments_count); |
| - int closure_id = *info()->closure() != *environment->closure() |
| + bool has_closure_id = !info()->closure().is_null() && |
| + *info()->closure() != *environment->closure(); |
| + int closure_id = has_closure_id |
| ? DefineDeoptimizationLiteral(environment->closure()) |
| : Translation::kSelfLiteralId; |
| @@ -514,6 +588,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, |
| ASSERT(height == 0); |
| translation->BeginSetterStubFrame(closure_id); |
| break; |
| + case STUB: |
| + translation->BeginCompiledStubPseudoFrame(Code::KEYED_LOAD_IC); |
|
Jakob Kummerow
2012/11/19 12:36:00
can we get the code type dynamically?
danno
2012/11/26 17:16:18
Done.
|
| + break; |
| case ARGUMENTS_ADAPTOR: |
| translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); |
| break; |
| @@ -709,7 +786,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { |
| RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| ASSERT(environment->HasBeenRegistered()); |
| int id = environment->deoptimization_index(); |
| - Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); |
| + |
| + Deoptimizer::BailoutType bailout_type = frame_is_built_ |
| + ? Deoptimizer::EAGER |
| + : Deoptimizer::LAZY; |
| + Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type); |
| if (entry == NULL) { |
| Abort("bailout was not prepared"); |
| return; |
| @@ -725,14 +806,19 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { |
| if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc); |
| - if (cc == al) { |
| - __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
| + ASSERT(info()->IsStub() || frame_is_built_); |
| + if (cc == al && frame_is_built_) { |
| + __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
|
Jakob Kummerow
2012/11/19 12:36:00
nit: indentation
danno
2012/11/26 17:16:18
Done.
|
| } else { |
| // We often have several deopts to the same entry, reuse the last |
| // jump entry if this is the case. |
| + bool is_call = !frame_is_built_; |
| if (deopt_jump_table_.is_empty() || |
| - (deopt_jump_table_.last().address != entry)) { |
| - deopt_jump_table_.Add(JumpTableEntry(entry), zone()); |
| + (deopt_jump_table_.last().address != entry) || |
| + (deopt_jump_table_.last().is_call != is_call) || |
| + (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { |
| + deopt_jump_table_.Add( |
| + JumpTableEntry(entry, !frame_is_built_, is_call), zone()); |
| } |
| __ b(cc, &deopt_jump_table_.last().label); |
| } |
| @@ -1773,9 +1859,9 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); |
| } else { |
| ASSERT(instr->hydrogen()->representation().IsDouble()); |
| - DoubleRegister left_reg = ToDoubleRegister(left); |
| - DoubleRegister right_reg = ToDoubleRegister(right); |
| - DoubleRegister result_reg = ToDoubleRegister(instr->result()); |
| + DwVfpRegister left_reg = ToDoubleRegister(left); |
| + DwVfpRegister right_reg = ToDoubleRegister(right); |
| + DwVfpRegister result_reg = ToDoubleRegister(instr->result()); |
| Label check_nan_left, check_zero, return_left, return_right, done; |
| __ VFPCompareAndSetFlags(left_reg, right_reg); |
| __ b(vs, &check_nan_left); |
| @@ -1818,9 +1904,9 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
| - DoubleRegister left = ToDoubleRegister(instr->left()); |
| - DoubleRegister right = ToDoubleRegister(instr->right()); |
| - DoubleRegister result = ToDoubleRegister(instr->result()); |
| + DwVfpRegister left = ToDoubleRegister(instr->left()); |
| + DwVfpRegister right = ToDoubleRegister(instr->right()); |
| + DwVfpRegister result = ToDoubleRegister(instr->result()); |
| switch (instr->op()) { |
| case Token::ADD: |
| __ vadd(result, left, right); |
| @@ -1908,7 +1994,7 @@ void LCodeGen::DoBranch(LBranch* instr) { |
| __ cmp(reg, Operand(0)); |
| EmitBranch(true_block, false_block, ne); |
| } else if (r.IsDouble()) { |
| - DoubleRegister reg = ToDoubleRegister(instr->value()); |
| + DwVfpRegister reg = ToDoubleRegister(instr->value()); |
| Register scratch = scratch0(); |
| // Test the double value. Zero and NaN are false. |
| @@ -1994,7 +2080,7 @@ void LCodeGen::DoBranch(LBranch* instr) { |
| if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { |
| // heap number -> false iff +0, -0, or NaN. |
| - DoubleRegister dbl_scratch = double_scratch0(); |
| + DwVfpRegister dbl_scratch = double_scratch0(); |
| Label not_heap_number; |
| __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
| __ b(ne, ¬_heap_number); |
| @@ -2610,16 +2696,21 @@ void LCodeGen::DoCmpT(LCmpT* instr) { |
| void LCodeGen::DoReturn(LReturn* instr) { |
| - if (FLAG_trace) { |
| + if (FLAG_trace && info()->IsOptimizing()) { |
| // Push the return value on the stack as the parameter. |
| // Runtime::TraceExit returns its parameter in r0. |
| __ push(r0); |
| __ CallRuntime(Runtime::kTraceExit, 1); |
| } |
| - int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; |
| - __ mov(sp, fp); |
| - __ ldm(ia_w, sp, fp.bit() | lr.bit()); |
| - __ add(sp, sp, Operand(sp_delta)); |
| + if (NeedsEagerFrame()) { |
| + int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; |
| + __ mov(sp, fp); |
| + __ ldm(ia_w, sp, fp.bit() | lr.bit()); |
| + __ add(sp, sp, Operand(sp_delta)); |
| + } |
| + if (info()->IsStub()) { |
| + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| + } |
| __ Jump(lr); |
| } |
| @@ -2969,17 +3060,24 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { |
| if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
| elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| - CpuFeatures::Scope scope(VFP3); |
| DwVfpRegister result = ToDoubleRegister(instr->result()); |
| Operand operand = key_is_constant |
| ? Operand(constant_key << element_size_shift) |
| : Operand(key, LSL, shift_size); |
| __ add(scratch0(), external_pointer, operand); |
| - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| - __ vldr(result.low(), scratch0(), additional_offset); |
| - __ vcvt_f64_f32(result, result.low()); |
| - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS |
| - __ vldr(result, scratch0(), additional_offset); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| + __ vldr(result.low(), scratch0(), additional_offset); |
| + __ vcvt_f64_f32(result, result.low()); |
| + } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS |
| + __ vldr(result, scratch0(), additional_offset); |
| + } |
| + } else { |
| + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| + UNIMPLEMENTED(); |
| + } else { |
| + } |
| } |
| } else { |
| Register result = ToRegister(instr->result()); |
| @@ -3048,23 +3146,28 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { |
| key = ToRegister(instr->key()); |
| } |
| - Operand operand = key_is_constant |
| - ? Operand(((constant_key + instr->additional_index()) << |
| - element_size_shift) + |
| - FixedDoubleArray::kHeaderSize - kHeapObjectTag) |
| - : Operand(key, LSL, shift_size); |
| - __ add(elements, elements, operand); |
| + int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) + |
| + ((constant_key + instr->additional_index()) << element_size_shift); |
| if (!key_is_constant) { |
| - __ add(elements, elements, |
| - Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + |
| - (instr->additional_index() << element_size_shift))); |
| - } |
| - |
| - __ vldr(result, elements, 0); |
| - if (instr->hydrogen()->RequiresHoleCheck()) { |
| - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); |
| - __ cmp(scratch, Operand(kHoleNanUpper32)); |
| - DeoptimizeIf(eq, instr->environment()); |
| + __ add(elements, elements, Operand(key, LSL, shift_size)); |
| + } |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| + __ add(elements, elements, Operand(base_offset)); |
| + __ vldr(result, elements, 0); |
| + if (instr->hydrogen()->RequiresHoleCheck()) { |
| + __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); |
| + __ cmp(scratch, Operand(kHoleNanUpper32)); |
| + DeoptimizeIf(eq, instr->environment()); |
| + } |
| + } else { |
| + __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize)); |
| + __ ldr(sfpd_lo, MemOperand(elements, base_offset)); |
| + if (instr->hydrogen()->RequiresHoleCheck()) { |
| + ASSERT(kPointerSize == sizeof(kHoleNanLower32)); |
| + __ cmp(sfpd_hi, Operand(kHoleNanUpper32)); |
| + DeoptimizeIf(eq, instr->environment()); |
| + } |
| } |
| } |
| @@ -3536,7 +3639,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { |
| void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { |
| - DoubleRegister input = ToDoubleRegister(instr->value()); |
| + DwVfpRegister input = ToDoubleRegister(instr->value()); |
| Register result = ToRegister(instr->result()); |
| Register scratch = scratch0(); |
| @@ -3561,7 +3664,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { |
| void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { |
| - DoubleRegister input = ToDoubleRegister(instr->value()); |
| + DwVfpRegister input = ToDoubleRegister(instr->value()); |
| Register result = ToRegister(instr->result()); |
| DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
| Register scratch = scratch0(); |
| @@ -3626,16 +3729,16 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { |
| void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { |
| - DoubleRegister input = ToDoubleRegister(instr->value()); |
| - DoubleRegister result = ToDoubleRegister(instr->result()); |
| + DwVfpRegister input = ToDoubleRegister(instr->value()); |
| + DwVfpRegister result = ToDoubleRegister(instr->result()); |
| __ vsqrt(result, input); |
| } |
| void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { |
| - DoubleRegister input = ToDoubleRegister(instr->value()); |
| - DoubleRegister result = ToDoubleRegister(instr->result()); |
| - DoubleRegister temp = ToDoubleRegister(instr->temp()); |
| + DwVfpRegister input = ToDoubleRegister(instr->value()); |
| + DwVfpRegister result = ToDoubleRegister(instr->result()); |
| + DwVfpRegister temp = ToDoubleRegister(instr->temp()); |
| // Note that according to ECMA-262 15.8.2.13: |
| // Math.pow(-Infinity, 0.5) == Infinity |
| @@ -4469,7 +4572,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, |
| Label slow; |
| Register src = ToRegister(value); |
| Register dst = ToRegister(instr->result()); |
| - DoubleRegister dbl_scratch = double_scratch0(); |
| + DwVfpRegister dbl_scratch = double_scratch0(); |
| SwVfpRegister flt_scratch = dbl_scratch.low(); |
| // Preserve the value of all registers. |
| @@ -4484,8 +4587,22 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, |
| __ SmiUntag(src, dst); |
| __ eor(src, src, Operand(0x80000000)); |
| } |
| - __ vmov(flt_scratch, src); |
| - __ vcvt_f64_s32(dbl_scratch, flt_scratch); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| + __ vmov(flt_scratch, src); |
| + __ vcvt_f64_s32(dbl_scratch, flt_scratch); |
| + } else { |
| + FloatingPointHelper::Destination dest = |
| + FloatingPointHelper::kCoreRegisters; |
| + FloatingPointHelper::ConvertIntToDouble(masm(), |
|
Jakob Kummerow
2012/11/19 12:36:00
nit: fix format
danno
2012/11/26 17:16:18
Done.
|
| + src, |
| + dest, |
| + d0, |
| + sfpd_lo, |
| + sfpd_hi, |
| + r9, |
| + s0); |
| + } |
| } else { |
| __ vmov(flt_scratch, src); |
| __ vcvt_f64_u32(dbl_scratch, flt_scratch); |
| @@ -4513,7 +4630,13 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, |
| // Done. Put the value in dbl_scratch into the value of the allocated heap |
| // number. |
| __ bind(&done); |
| - __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| + __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); |
| + } else { |
| + __ str(sfpd_lo, MemOperand(dst, HeapNumber::kValueOffset)); |
| + __ str(sfpd_hi, MemOperand(dst, HeapNumber::kValueOffset + kPointerSize)); |
| + } |
| __ add(dst, dst, Operand(kHeapObjectTag)); |
| __ StoreToSafepointRegisterSlot(dst, dst); |
| } |
| @@ -4530,7 +4653,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
| LNumberTagD* instr_; |
| }; |
| - DoubleRegister input_reg = ToDoubleRegister(instr->value()); |
| + DwVfpRegister input_reg = ToDoubleRegister(instr->value()); |
| Register scratch = scratch0(); |
| Register reg = ToRegister(instr->result()); |
| Register temp1 = ToRegister(instr->temp()); |
| @@ -4546,7 +4669,13 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
| __ jmp(deferred->entry()); |
| } |
| __ bind(deferred->exit()); |
| - __ vstr(input_reg, reg, HeapNumber::kValueOffset); |
| + if (CpuFeatures::IsSupported(VFP2)) { |
| + CpuFeatures::Scope scope(VFP2); |
| + __ vstr(input_reg, reg, HeapNumber::kValueOffset); |
| + } else { |
| + __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); |
| + __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); |
| + } |
| // Now that we have finished with the object's real address tag it |
| __ add(reg, reg, Operand(kHeapObjectTag)); |
| } |
| @@ -4587,7 +4716,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| void LCodeGen::EmitNumberUntagD(Register input_reg, |
| - DoubleRegister result_reg, |
| + DwVfpRegister result_reg, |
| bool deoptimize_on_undefined, |
| bool deoptimize_on_minus_zero, |
| LEnvironment* env) { |
| @@ -4759,7 +4888,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
| ASSERT(result->IsDoubleRegister()); |
| Register input_reg = ToRegister(input); |
| - DoubleRegister result_reg = ToDoubleRegister(result); |
| + DwVfpRegister result_reg = ToDoubleRegister(result); |
| EmitNumberUntagD(input_reg, result_reg, |
| instr->hydrogen()->deoptimize_on_undefined(), |
| @@ -4880,10 +5009,10 @@ void LCodeGen::DoCheckMapCommon(Register reg, |
| Register scratch, |
| Handle<Map> map, |
| CompareMapMode mode, |
| - LEnvironment* env) { |
| + LInstruction* instr) { |
| Label success; |
| __ CompareMap(reg, scratch, map, &success, mode); |
| - DeoptimizeIf(ne, env); |
| + DeoptimizeIf(ne, instr->environment()); |
| __ bind(&success); |
| } |
| @@ -4902,15 +5031,15 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| __ b(eq, &success); |
| } |
| Handle<Map> map = map_set->last(); |
| - DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); |
| + DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr); |
| __ bind(&success); |
| } |
| void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| - DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| + DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| Register result_reg = ToRegister(instr->result()); |
| - DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); |
| + DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); |
| __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); |
| } |
| @@ -4926,7 +5055,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
| Register scratch = scratch0(); |
| Register input_reg = ToRegister(instr->unclamped()); |
| Register result_reg = ToRegister(instr->result()); |
| - DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); |
| + DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); |
| Label is_smi, done, heap_number; |
| // Both smi and heap number cases are handled. |
| @@ -4974,7 +5103,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { |
| while (!current_prototype.is_identical_to(holder)) { |
| DoCheckMapCommon(temp1, temp2, |
| Handle<Map>(current_prototype->map()), |
| - ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); |
| + ALLOW_ELEMENT_TRANSITION_MAPS, instr); |
| current_prototype = |
| Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); |
| // Load next prototype object. |
| @@ -4984,7 +5113,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { |
| // Check the holder map. |
| DoCheckMapCommon(temp1, temp2, |
| Handle<Map>(current_prototype->map()), |
| - ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); |
| + ALLOW_ELEMENT_TRANSITION_MAPS, instr); |
| } |
| @@ -5503,6 +5632,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { |
| void LCodeGen::EnsureSpaceForLazyDeopt() { |
| + if (info()->IsStub()) return; |
| // Ensure that we have enough space after the previous lazy-bailout |
| // instruction for patching the code here. |
| int current_pc = masm()->pc_offset(); |