Index: src/ia32/lithium-codegen-ia32.cc |
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc |
index 38032dacd929f3fea558cd48b247a9f9da211e1e..d47406232fab5c269922777b1b8493090251bd19 100644 |
--- a/src/ia32/lithium-codegen-ia32.cc |
+++ b/src/ia32/lithium-codegen-ia32.cc |
@@ -17,14 +17,6 @@ |
namespace v8 { |
namespace internal { |
- |
-static SaveFPRegsMode GetSaveFPRegsMode(Isolate* isolate) { |
- // We don't need to save floating point regs when generating the snapshot |
- return CpuFeatures::IsSafeForSnapshot(isolate, SSE2) ? kSaveFPRegs |
- : kDontSaveFPRegs; |
-} |
- |
- |
// When invoking builtins, we need to record the safepoint in the middle of |
// the invoke instruction sequence generated by the macro assembler. |
class SafepointGenerator V8_FINAL : public CallWrapper { |
@@ -103,7 +95,6 @@ void LCodeGen::SaveCallerDoubles() { |
ASSERT(info()->saves_caller_doubles()); |
ASSERT(NeedsEagerFrame()); |
Comment(";;; Save clobbered callee double registers"); |
- CpuFeatureScope scope(masm(), SSE2); |
int count = 0; |
BitVector* doubles = chunk()->allocated_double_registers(); |
BitVector::Iterator save_iterator(doubles); |
@@ -120,7 +111,6 @@ void LCodeGen::RestoreCallerDoubles() { |
ASSERT(info()->saves_caller_doubles()); |
ASSERT(NeedsEagerFrame()); |
Comment(";;; Restore clobbered callee double registers"); |
- CpuFeatureScope scope(masm(), SSE2); |
BitVector* doubles = chunk()->allocated_double_registers(); |
BitVector::Iterator save_iterator(doubles); |
int count = 0; |
@@ -253,9 +243,7 @@ bool LCodeGen::GeneratePrologue() { |
} |
} |
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { |
- SaveCallerDoubles(); |
- } |
+ if (info()->saves_caller_doubles()) SaveCallerDoubles(); |
} |
// Possibly allocate a local context. |
@@ -367,27 +355,10 @@ void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { |
if (!instr->IsLazyBailout() && !instr->IsGap()) { |
safepoints_.BumpLastLazySafepointIndex(); |
} |
- if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); |
} |
-void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { |
- if (!CpuFeatures::IsSupported(SSE2)) { |
- if (instr->IsGoto()) { |
- x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr)); |
- } else if (FLAG_debug_code && FLAG_enable_slow_asserts && |
- !instr->IsGap() && !instr->IsReturn()) { |
- if (instr->ClobbersDoubleRegisters(isolate())) { |
- if (instr->HasDoubleRegisterResult()) { |
- ASSERT_EQ(1, x87_stack_.depth()); |
- } else { |
- ASSERT_EQ(0, x87_stack_.depth()); |
- } |
- } |
- __ VerifyX87StackDepth(x87_stack_.depth()); |
- } |
- } |
-} |
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { } |
bool LCodeGen::GenerateJumpTable() { |
@@ -433,9 +404,7 @@ bool LCodeGen::GenerateJumpTable() { |
__ ret(0); // Call the continuation without clobbering registers. |
} |
} else { |
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { |
- RestoreCallerDoubles(); |
- } |
+ if (info()->saves_caller_doubles()) RestoreCallerDoubles(); |
__ call(entry, RelocInfo::RUNTIME_ENTRY); |
} |
} |
@@ -448,8 +417,6 @@ bool LCodeGen::GenerateDeferredCode() { |
if (deferred_.length() > 0) { |
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
LDeferredCode* code = deferred_[i]; |
- X87Stack copy(code->x87_stack()); |
- x87_stack_ = copy; |
HValue* value = |
instructions_->at(code->instruction_index())->hydrogen_value(); |
@@ -515,232 +482,17 @@ Register LCodeGen::ToRegister(int index) const { |
} |
-X87Register LCodeGen::ToX87Register(int index) const { |
- return X87Register::FromAllocationIndex(index); |
-} |
- |
- |
XMMRegister LCodeGen::ToDoubleRegister(int index) const { |
return XMMRegister::FromAllocationIndex(index); |
} |
-void LCodeGen::X87LoadForUsage(X87Register reg) { |
- ASSERT(x87_stack_.Contains(reg)); |
- x87_stack_.Fxch(reg); |
- x87_stack_.pop(); |
-} |
- |
- |
-void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) { |
- ASSERT(x87_stack_.Contains(reg1)); |
- ASSERT(x87_stack_.Contains(reg2)); |
- x87_stack_.Fxch(reg1, 1); |
- x87_stack_.Fxch(reg2); |
- x87_stack_.pop(); |
- x87_stack_.pop(); |
-} |
- |
- |
-void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { |
- ASSERT(is_mutable_); |
- ASSERT(Contains(reg) && stack_depth_ > other_slot); |
- int i = ArrayIndex(reg); |
- int st = st2idx(i); |
- if (st != other_slot) { |
- int other_i = st2idx(other_slot); |
- X87Register other = stack_[other_i]; |
- stack_[other_i] = reg; |
- stack_[i] = other; |
- if (st == 0) { |
- __ fxch(other_slot); |
- } else if (other_slot == 0) { |
- __ fxch(st); |
- } else { |
- __ fxch(st); |
- __ fxch(other_slot); |
- __ fxch(st); |
- } |
- } |
-} |
- |
- |
-int LCodeGen::X87Stack::st2idx(int pos) { |
- return stack_depth_ - pos - 1; |
-} |
- |
- |
-int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { |
- for (int i = 0; i < stack_depth_; i++) { |
- if (stack_[i].is(reg)) return i; |
- } |
- UNREACHABLE(); |
- return -1; |
-} |
- |
- |
-bool LCodeGen::X87Stack::Contains(X87Register reg) { |
- for (int i = 0; i < stack_depth_; i++) { |
- if (stack_[i].is(reg)) return true; |
- } |
- return false; |
-} |
- |
- |
-void LCodeGen::X87Stack::Free(X87Register reg) { |
- ASSERT(is_mutable_); |
- ASSERT(Contains(reg)); |
- int i = ArrayIndex(reg); |
- int st = st2idx(i); |
- if (st > 0) { |
- // keep track of how fstp(i) changes the order of elements |
- int tos_i = st2idx(0); |
- stack_[i] = stack_[tos_i]; |
- } |
- pop(); |
- __ fstp(st); |
-} |
- |
- |
-void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { |
- if (x87_stack_.Contains(dst)) { |
- x87_stack_.Fxch(dst); |
- __ fstp(0); |
- } else { |
- x87_stack_.push(dst); |
- } |
- X87Fld(src, opts); |
-} |
- |
- |
-void LCodeGen::X87Fld(Operand src, X87OperandType opts) { |
- ASSERT(!src.is_reg_only()); |
- switch (opts) { |
- case kX87DoubleOperand: |
- __ fld_d(src); |
- break; |
- case kX87FloatOperand: |
- __ fld_s(src); |
- break; |
- case kX87IntOperand: |
- __ fild_s(src); |
- break; |
- default: |
- UNREACHABLE(); |
- } |
-} |
- |
- |
-void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) { |
- ASSERT(!dst.is_reg_only()); |
- x87_stack_.Fxch(src); |
- switch (opts) { |
- case kX87DoubleOperand: |
- __ fst_d(dst); |
- break; |
- case kX87IntOperand: |
- __ fist_s(dst); |
- break; |
- default: |
- UNREACHABLE(); |
- } |
-} |
- |
- |
-void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) { |
- ASSERT(is_mutable_); |
- if (Contains(reg)) { |
- Free(reg); |
- } |
- // Mark this register as the next register to write to |
- stack_[stack_depth_] = reg; |
-} |
- |
- |
-void LCodeGen::X87Stack::CommitWrite(X87Register reg) { |
- ASSERT(is_mutable_); |
- // Assert the reg is prepared to write, but not on the virtual stack yet |
- ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) && |
- stack_depth_ < X87Register::kNumAllocatableRegisters); |
- stack_depth_++; |
-} |
- |
- |
-void LCodeGen::X87PrepareBinaryOp( |
- X87Register left, X87Register right, X87Register result) { |
- // You need to use DefineSameAsFirst for x87 instructions |
- ASSERT(result.is(left)); |
- x87_stack_.Fxch(right, 1); |
- x87_stack_.Fxch(left); |
-} |
- |
- |
-void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { |
- if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) { |
- bool double_inputs = instr->HasDoubleRegisterInput(); |
- |
- // Flush stack from tos down, since FreeX87() will mess with tos |
- for (int i = stack_depth_-1; i >= 0; i--) { |
- X87Register reg = stack_[i]; |
- // Skip registers which contain the inputs for the next instruction |
- // when flushing the stack |
- if (double_inputs && instr->IsDoubleInput(reg, cgen)) { |
- continue; |
- } |
- Free(reg); |
- if (i < stack_depth_-1) i++; |
- } |
- } |
- if (instr->IsReturn()) { |
- while (stack_depth_ > 0) { |
- __ fstp(0); |
- stack_depth_--; |
- } |
- if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0); |
- } |
-} |
- |
- |
-void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) { |
- ASSERT(stack_depth_ <= 1); |
- // If ever used for new stubs producing two pairs of doubles joined into two |
- // phis this assert hits. That situation is not handled, since the two stacks |
- // might have st0 and st1 swapped. |
- if (current_block_id + 1 != goto_instr->block_id()) { |
- // If we have a value on the x87 stack on leaving a block, it must be a |
- // phi input. If the next block we compile is not the join block, we have |
- // to discard the stack state. |
- stack_depth_ = 0; |
- } |
-} |
- |
- |
-void LCodeGen::EmitFlushX87ForDeopt() { |
- // The deoptimizer does not support X87 Registers. But as long as we |
- // deopt from a stub its not a problem, since we will re-materialize the |
- // original stub inputs, which can't be double registers. |
- ASSERT(info()->IsStub()); |
- if (FLAG_debug_code && FLAG_enable_slow_asserts) { |
- __ pushfd(); |
- __ VerifyX87StackDepth(x87_stack_.depth()); |
- __ popfd(); |
- } |
- for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0); |
-} |
- |
- |
Register LCodeGen::ToRegister(LOperand* op) const { |
ASSERT(op->IsRegister()); |
return ToRegister(op->index()); |
} |
-X87Register LCodeGen::ToX87Register(LOperand* op) const { |
- ASSERT(op->IsDoubleRegister()); |
- return ToX87Register(op->index()); |
-} |
- |
- |
XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
ASSERT(op->IsDoubleRegister()); |
return ToDoubleRegister(op->index()); |
@@ -1092,17 +844,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, |
__ popfd(); |
} |
- // Before Instructions which can deopt, we normally flush the x87 stack. But |
- // we can have inputs or outputs of the current instruction on the stack, |
- // thus we need to flush them here from the physical stack to leave it in a |
- // consistent state. |
- if (x87_stack_.depth() > 0) { |
- Label done; |
- if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); |
- EmitFlushX87ForDeopt(); |
- __ bind(&done); |
- } |
- |
if (info()->ShouldTrapOnDeopt()) { |
Label done; |
if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); |
@@ -1963,41 +1704,32 @@ void LCodeGen::DoConstantD(LConstantD* instr) { |
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
ASSERT(instr->result()->IsDoubleRegister()); |
- if (!CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { |
- __ push(Immediate(upper)); |
- __ push(Immediate(lower)); |
- X87Register reg = ToX87Register(instr->result()); |
- X87Mov(reg, Operand(esp, 0)); |
- __ add(Operand(esp), Immediate(kDoubleSize)); |
+ XMMRegister res = ToDoubleRegister(instr->result()); |
+ if (int_val == 0) { |
+ __ xorps(res, res); |
} else { |
- CpuFeatureScope scope1(masm(), SSE2); |
- XMMRegister res = ToDoubleRegister(instr->result()); |
- if (int_val == 0) { |
- __ xorps(res, res); |
- } else { |
- Register temp = ToRegister(instr->temp()); |
- if (CpuFeatures::IsSupported(SSE4_1)) { |
- CpuFeatureScope scope2(masm(), SSE4_1); |
- if (lower != 0) { |
- __ Move(temp, Immediate(lower)); |
- __ movd(res, Operand(temp)); |
- __ Move(temp, Immediate(upper)); |
- __ pinsrd(res, Operand(temp), 1); |
- } else { |
- __ xorps(res, res); |
- __ Move(temp, Immediate(upper)); |
- __ pinsrd(res, Operand(temp), 1); |
- } |
+ Register temp = ToRegister(instr->temp()); |
+ if (CpuFeatures::IsSupported(SSE4_1)) { |
+ CpuFeatureScope scope2(masm(), SSE4_1); |
+ if (lower != 0) { |
+ __ Move(temp, Immediate(lower)); |
+ __ movd(res, Operand(temp)); |
+ __ Move(temp, Immediate(upper)); |
+ __ pinsrd(res, Operand(temp), 1); |
} else { |
+ __ xorps(res, res); |
__ Move(temp, Immediate(upper)); |
- __ movd(res, Operand(temp)); |
- __ psllq(res, 32); |
- if (lower != 0) { |
- XMMRegister xmm_scratch = double_scratch0(); |
- __ Move(temp, Immediate(lower)); |
- __ movd(xmm_scratch, Operand(temp)); |
- __ orps(res, xmm_scratch); |
- } |
+ __ pinsrd(res, Operand(temp), 1); |
+ } |
+ } else { |
+ __ Move(temp, Immediate(upper)); |
+ __ movd(res, Operand(temp)); |
+ __ psllq(res, 32); |
+ if (lower != 0) { |
+ XMMRegister xmm_scratch = double_scratch0(); |
+ __ Move(temp, Immediate(lower)); |
+ __ movd(xmm_scratch, Operand(temp)); |
+ __ orps(res, xmm_scratch); |
} |
} |
} |
@@ -2180,7 +1912,6 @@ void LCodeGen::DoAddI(LAddI* instr) { |
void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
- CpuFeatureScope scope(masm(), SSE2); |
LOperand* left = instr->left(); |
LOperand* right = instr->right(); |
ASSERT(left->Equals(instr->result())); |
@@ -2243,88 +1974,45 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
- if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister left = ToDoubleRegister(instr->left()); |
- XMMRegister right = ToDoubleRegister(instr->right()); |
- XMMRegister result = ToDoubleRegister(instr->result()); |
- switch (instr->op()) { |
- case Token::ADD: |
- __ addsd(left, right); |
- break; |
- case Token::SUB: |
- __ subsd(left, right); |
- break; |
- case Token::MUL: |
- __ mulsd(left, right); |
- break; |
- case Token::DIV: |
- __ divsd(left, right); |
- // Don't delete this mov. It may improve performance on some CPUs, |
- // when there is a mulsd depending on the result |
- __ movaps(left, left); |
- break; |
- case Token::MOD: { |
- // Pass two doubles as arguments on the stack. |
- __ PrepareCallCFunction(4, eax); |
- __ movsd(Operand(esp, 0 * kDoubleSize), left); |
- __ movsd(Operand(esp, 1 * kDoubleSize), right); |
- __ CallCFunction( |
- ExternalReference::mod_two_doubles_operation(isolate()), |
- 4); |
- |
- // Return value is in st(0) on ia32. |
- // Store it into the result register. |
- __ sub(Operand(esp), Immediate(kDoubleSize)); |
- __ fstp_d(Operand(esp, 0)); |
- __ movsd(result, Operand(esp, 0)); |
- __ add(Operand(esp), Immediate(kDoubleSize)); |
- break; |
- } |
- default: |
- UNREACHABLE(); |
- break; |
- } |
- } else { |
- X87Register left = ToX87Register(instr->left()); |
- X87Register right = ToX87Register(instr->right()); |
- X87Register result = ToX87Register(instr->result()); |
- if (instr->op() != Token::MOD) { |
- X87PrepareBinaryOp(left, right, result); |
- } |
- switch (instr->op()) { |
- case Token::ADD: |
- __ fadd_i(1); |
- break; |
- case Token::SUB: |
- __ fsub_i(1); |
- break; |
- case Token::MUL: |
- __ fmul_i(1); |
- break; |
- case Token::DIV: |
- __ fdiv_i(1); |
- break; |
- case Token::MOD: { |
- // Pass two doubles as arguments on the stack. |
- __ PrepareCallCFunction(4, eax); |
- X87Mov(Operand(esp, 1 * kDoubleSize), right); |
- X87Mov(Operand(esp, 0), left); |
- X87Free(right); |
- ASSERT(left.is(result)); |
- X87PrepareToWrite(result); |
- __ CallCFunction( |
- ExternalReference::mod_two_doubles_operation(isolate()), |
- 4); |
- |
- // Return value is in st(0) on ia32. |
- X87CommitWrite(result); |
- break; |
- } |
- default: |
- UNREACHABLE(); |
- break; |
+ XMMRegister left = ToDoubleRegister(instr->left()); |
+ XMMRegister right = ToDoubleRegister(instr->right()); |
+ XMMRegister result = ToDoubleRegister(instr->result()); |
+ switch (instr->op()) { |
+ case Token::ADD: |
+ __ addsd(left, right); |
+ break; |
+ case Token::SUB: |
+ __ subsd(left, right); |
+ break; |
+ case Token::MUL: |
+ __ mulsd(left, right); |
+ break; |
+ case Token::DIV: |
+ __ divsd(left, right); |
+ // Don't delete this mov. It may improve performance on some CPUs, |
+ // when there is a mulsd depending on the result |
+ __ movaps(left, left); |
+ break; |
+ case Token::MOD: { |
+ // Pass two doubles as arguments on the stack. |
+ __ PrepareCallCFunction(4, eax); |
+ __ movsd(Operand(esp, 0 * kDoubleSize), left); |
+ __ movsd(Operand(esp, 1 * kDoubleSize), right); |
+ __ CallCFunction( |
+ ExternalReference::mod_two_doubles_operation(isolate()), |
+ 4); |
+ |
+ // Return value is in st(0) on ia32. |
+ // Store it into the result register. |
+ __ sub(Operand(esp), Immediate(kDoubleSize)); |
+ __ fstp_d(Operand(esp, 0)); |
+ __ movsd(result, Operand(esp, 0)); |
+ __ add(Operand(esp), Immediate(kDoubleSize)); |
+ break; |
} |
+ default: |
+ UNREACHABLE(); |
+ break; |
} |
} |
@@ -2379,7 +2067,6 @@ void LCodeGen::DoBranch(LBranch* instr) { |
EmitBranch(instr, not_zero); |
} else if (r.IsDouble()) { |
ASSERT(!info()->IsStub()); |
- CpuFeatureScope scope(masm(), SSE2); |
XMMRegister reg = ToDoubleRegister(instr->value()); |
XMMRegister xmm_scratch = double_scratch0(); |
__ xorps(xmm_scratch, xmm_scratch); |
@@ -2402,7 +2089,6 @@ void LCodeGen::DoBranch(LBranch* instr) { |
EmitBranch(instr, no_condition); |
} else if (type.IsHeapNumber()) { |
ASSERT(!info()->IsStub()); |
- CpuFeatureScope scope(masm(), SSE2); |
XMMRegister xmm_scratch = double_scratch0(); |
__ xorps(xmm_scratch, xmm_scratch); |
__ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); |
@@ -2488,16 +2174,9 @@ void LCodeGen::DoBranch(LBranch* instr) { |
__ cmp(FieldOperand(reg, HeapObject::kMapOffset), |
factory()->heap_number_map()); |
__ j(not_equal, ¬_heap_number, Label::kNear); |
- if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister xmm_scratch = double_scratch0(); |
- __ xorps(xmm_scratch, xmm_scratch); |
- __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); |
- } else { |
- __ fldz(); |
- __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
- __ FCmp(); |
- } |
+ XMMRegister xmm_scratch = double_scratch0(); |
+ __ xorps(xmm_scratch, xmm_scratch); |
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); |
__ j(zero, instr->FalseLabel(chunk_)); |
__ jmp(instr->TrueLabel(chunk_)); |
__ bind(¬_heap_number); |
@@ -2520,10 +2199,6 @@ void LCodeGen::EmitGoto(int block) { |
} |
-void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) { |
-} |
- |
- |
void LCodeGen::DoGoto(LGoto* instr) { |
EmitGoto(instr->block_id()); |
} |
@@ -2575,13 +2250,7 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { |
EmitGoto(next_block); |
} else { |
if (instr->is_double()) { |
- if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); |
- } else { |
- X87LoadForUsage(ToX87Register(right), ToX87Register(left)); |
- __ FCmp(); |
- } |
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); |
// Don't base result on EFLAGS when a NaN is involved. Instead |
// jump to the false block. |
__ j(parity_even, instr->FalseLabel(chunk_)); |
@@ -2625,35 +2294,12 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { |
return; |
} |
- bool use_sse2 = CpuFeatures::IsSupported(SSE2); |
- if (use_sse2) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister input_reg = ToDoubleRegister(instr->object()); |
- __ ucomisd(input_reg, input_reg); |
- EmitFalseBranch(instr, parity_odd); |
- } else { |
- // Put the value to the top of stack |
- X87Register src = ToX87Register(instr->object()); |
- X87LoadForUsage(src); |
- __ fld(0); |
- __ fld(0); |
- __ FCmp(); |
- Label ok; |
- __ j(parity_even, &ok, Label::kNear); |
- __ fstp(0); |
- EmitFalseBranch(instr, no_condition); |
- __ bind(&ok); |
- } |
- |
+ XMMRegister input_reg = ToDoubleRegister(instr->object()); |
+ __ ucomisd(input_reg, input_reg); |
+ EmitFalseBranch(instr, parity_odd); |
__ sub(esp, Immediate(kDoubleSize)); |
- if (use_sse2) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister input_reg = ToDoubleRegister(instr->object()); |
- __ movsd(MemOperand(esp, 0), input_reg); |
- } else { |
- __ fstp_d(MemOperand(esp, 0)); |
- } |
+ __ movsd(MemOperand(esp, 0), input_reg); |
__ add(esp, Immediate(kDoubleSize)); |
int offset = sizeof(kHoleNanUpper32); |
@@ -2668,7 +2314,6 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { |
Register scratch = ToRegister(instr->temp()); |
if (rep.IsDouble()) { |
- CpuFeatureScope use_sse2(masm(), SSE2); |
XMMRegister value = ToDoubleRegister(instr->value()); |
XMMRegister xmm_scratch = double_scratch0(); |
__ xorps(xmm_scratch, xmm_scratch); |
@@ -2971,9 +2616,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { |
public: |
DeferredInstanceOfKnownGlobal(LCodeGen* codegen, |
- LInstanceOfKnownGlobal* instr, |
- const X87Stack& x87_stack) |
- : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
+ LInstanceOfKnownGlobal* instr) |
+ : LDeferredCode(codegen), instr_(instr) { } |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); |
} |
@@ -2985,7 +2629,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
}; |
DeferredInstanceOfKnownGlobal* deferred; |
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_); |
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); |
Label done, false_result; |
Register object = ToRegister(instr->value()); |
@@ -3134,9 +2778,7 @@ void LCodeGen::DoReturn(LReturn* instr) { |
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
__ CallRuntime(Runtime::kTraceExit, 1); |
} |
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { |
- RestoreCallerDoubles(); |
- } |
+ if (info()->saves_caller_doubles()) RestoreCallerDoubles(); |
if (dynamic_frame_alignment_) { |
// Fetch the state of the dynamic frame alignment. |
__ mov(edx, Operand(ebp, |
@@ -3251,7 +2893,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
offset, |
value, |
temp, |
- GetSaveFPRegsMode(isolate()), |
+ kSaveFPRegs, |
EMIT_REMEMBERED_SET, |
check_needed); |
} |
@@ -3276,13 +2918,8 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { |
Register object = ToRegister(instr->object()); |
if (instr->hydrogen()->representation().IsDouble()) { |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister result = ToDoubleRegister(instr->result()); |
- __ movsd(result, FieldOperand(object, offset)); |
- } else { |
- X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset)); |
- } |
+ XMMRegister result = ToDoubleRegister(instr->result()); |
+ __ movsd(result, FieldOperand(object, offset)); |
return; |
} |
@@ -3409,22 +3046,12 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { |
instr->additional_index())); |
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || |
elements_kind == FLOAT32_ELEMENTS) { |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister result(ToDoubleRegister(instr->result())); |
- __ movss(result, operand); |
- __ cvtss2sd(result, result); |
- } else { |
- X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand); |
- } |
+ XMMRegister result(ToDoubleRegister(instr->result())); |
+ __ movss(result, operand); |
+ __ cvtss2sd(result, result); |
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || |
elements_kind == FLOAT64_ELEMENTS) { |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- __ movsd(ToDoubleRegister(instr->result()), operand); |
- } else { |
- X87Mov(ToX87Register(instr->result()), operand); |
- } |
+ __ movsd(ToDoubleRegister(instr->result()), operand); |
} else { |
Register result(ToRegister(instr->result())); |
switch (elements_kind) { |
@@ -3498,13 +3125,8 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { |
FAST_DOUBLE_ELEMENTS, |
FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
instr->additional_index()); |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister result = ToDoubleRegister(instr->result()); |
- __ movsd(result, double_load_operand); |
- } else { |
- X87Mov(ToX87Register(instr->result()), double_load_operand); |
- } |
+ XMMRegister result = ToDoubleRegister(instr->result()); |
+ __ movsd(result, double_load_operand); |
} |
@@ -3926,9 +3548,8 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) { |
class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { |
public: |
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, |
- LMathAbs* instr, |
- const X87Stack& x87_stack) |
- : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
+ LMathAbs* instr) |
+ : LDeferredCode(codegen), instr_(instr) { } |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); |
} |
@@ -3940,7 +3561,6 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) { |
ASSERT(instr->value()->Equals(instr->result())); |
Representation r = instr->hydrogen()->value()->representation(); |
- CpuFeatureScope scope(masm(), SSE2); |
if (r.IsDouble()) { |
XMMRegister scratch = double_scratch0(); |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
@@ -3951,7 +3571,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) { |
EmitIntegerMathAbs(instr); |
} else { // Tagged case. |
DeferredMathAbsTaggedHeapNumber* deferred = |
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_); |
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); |
Register input_reg = ToRegister(instr->value()); |
// Smi check. |
__ JumpIfNotSmi(input_reg, deferred->entry()); |
@@ -3962,7 +3582,6 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) { |
void LCodeGen::DoMathFloor(LMathFloor* instr) { |
- CpuFeatureScope scope(masm(), SSE2); |
XMMRegister xmm_scratch = double_scratch0(); |
Register output_reg = ToRegister(instr->result()); |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
@@ -4028,7 +3647,6 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { |
void LCodeGen::DoMathRound(LMathRound* instr) { |
- CpuFeatureScope scope(masm(), SSE2); |
Register output_reg = ToRegister(instr->result()); |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
XMMRegister xmm_scratch = double_scratch0(); |
@@ -4091,7 +3709,6 @@ void LCodeGen::DoMathRound(LMathRound* instr) { |
void LCodeGen::DoMathSqrt(LMathSqrt* instr) { |
- CpuFeatureScope scope(masm(), SSE2); |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
__ sqrtsd(input_reg, input_reg); |
@@ -4099,7 +3716,6 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) { |
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { |
- CpuFeatureScope scope(masm(), SSE2); |
XMMRegister xmm_scratch = double_scratch0(); |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
Register scratch = ToRegister(instr->temp()); |
@@ -4167,7 +3783,6 @@ void LCodeGen::DoPower(LPower* instr) { |
void LCodeGen::DoMathLog(LMathLog* instr) { |
- CpuFeatureScope scope(masm(), SSE2); |
ASSERT(instr->value()->Equals(instr->result())); |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
XMMRegister xmm_scratch = double_scratch0(); |
@@ -4199,7 +3814,6 @@ void LCodeGen::DoMathLog(LMathLog* instr) { |
void LCodeGen::DoMathClz32(LMathClz32* instr) { |
- CpuFeatureScope scope(masm(), SSE2); |
Register input = ToRegister(instr->value()); |
Register result = ToRegister(instr->result()); |
Label not_zero_input; |
@@ -4214,7 +3828,6 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) { |
void LCodeGen::DoMathExp(LMathExp* instr) { |
- CpuFeatureScope scope(masm(), SSE2); |
XMMRegister input = ToDoubleRegister(instr->value()); |
XMMRegister result = ToDoubleRegister(instr->result()); |
XMMRegister temp0 = double_scratch0(); |
@@ -4393,14 +4006,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { |
ASSERT(access.IsInobject()); |
ASSERT(!instr->hydrogen()->has_transition()); |
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister value = ToDoubleRegister(instr->value()); |
- __ movsd(FieldOperand(object, offset), value); |
- } else { |
- X87Register value = ToX87Register(instr->value()); |
- X87Mov(FieldOperand(object, offset), value); |
- } |
+ XMMRegister value = ToDoubleRegister(instr->value()); |
+ __ movsd(FieldOperand(object, offset), value); |
return; |
} |
@@ -4419,7 +4026,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { |
HeapObject::kMapOffset, |
temp_map, |
temp, |
- GetSaveFPRegsMode(isolate()), |
+ kSaveFPRegs, |
OMIT_REMEMBERED_SET, |
OMIT_SMI_CHECK); |
} |
@@ -4460,7 +4067,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { |
offset, |
value, |
temp, |
- GetSaveFPRegsMode(isolate()), |
+ kSaveFPRegs, |
EMIT_REMEMBERED_SET, |
check_needed); |
} |
@@ -4520,23 +4127,12 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
instr->additional_index())); |
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || |
elements_kind == FLOAT32_ELEMENTS) { |
- if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister xmm_scratch = double_scratch0(); |
- __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value())); |
- __ movss(operand, xmm_scratch); |
- } else { |
- __ fld(0); |
- __ fstp_s(operand); |
- } |
+ XMMRegister xmm_scratch = double_scratch0(); |
+ __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value())); |
+ __ movss(operand, xmm_scratch); |
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || |
elements_kind == FLOAT64_ELEMENTS) { |
- if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- __ movsd(operand, ToDoubleRegister(instr->value())); |
- } else { |
- X87Mov(operand, ToX87Register(instr->value())); |
- } |
+ __ movsd(operand, ToDoubleRegister(instr->value())); |
} else { |
Register value = ToRegister(instr->value()); |
switch (elements_kind) { |
@@ -4590,68 +4186,19 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
instr->additional_index()); |
- if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister value = ToDoubleRegister(instr->value()); |
+ XMMRegister value = ToDoubleRegister(instr->value()); |
- if (instr->NeedsCanonicalization()) { |
- Label have_value; |
+ if (instr->NeedsCanonicalization()) { |
+ Label have_value; |
- __ ucomisd(value, value); |
- __ j(parity_odd, &have_value, Label::kNear); // NaN. |
+ __ ucomisd(value, value); |
+ __ j(parity_odd, &have_value, Label::kNear); // NaN. |
- __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); |
- __ bind(&have_value); |
- } |
- |
- __ movsd(double_store_operand, value); |
- } else { |
- // Can't use SSE2 in the serializer |
- if (instr->hydrogen()->IsConstantHoleStore()) { |
- // This means we should store the (double) hole. No floating point |
- // registers required. |
- double nan_double = FixedDoubleArray::hole_nan_as_double(); |
- uint64_t int_val = BitCast<uint64_t, double>(nan_double); |
- int32_t lower = static_cast<int32_t>(int_val); |
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
- |
- __ mov(double_store_operand, Immediate(lower)); |
- Operand double_store_operand2 = BuildFastArrayOperand( |
- instr->elements(), |
- instr->key(), |
- instr->hydrogen()->key()->representation(), |
- FAST_DOUBLE_ELEMENTS, |
- FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize, |
- instr->additional_index()); |
- __ mov(double_store_operand2, Immediate(upper)); |
- } else { |
- Label no_special_nan_handling; |
- X87Register value = ToX87Register(instr->value()); |
- X87Fxch(value); |
- |
- if (instr->NeedsCanonicalization()) { |
- __ fld(0); |
- __ fld(0); |
- __ FCmp(); |
- |
- __ j(parity_odd, &no_special_nan_handling, Label::kNear); |
- __ sub(esp, Immediate(kDoubleSize)); |
- __ fst_d(MemOperand(esp, 0)); |
- __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), |
- Immediate(kHoleNanUpper32)); |
- __ add(esp, Immediate(kDoubleSize)); |
- Label canonicalize; |
- __ j(not_equal, &canonicalize, Label::kNear); |
- __ jmp(&no_special_nan_handling, Label::kNear); |
- __ bind(&canonicalize); |
- __ fstp(0); |
- __ fld_d(Operand::StaticVariable(canonical_nan_reference)); |
- } |
- |
- __ bind(&no_special_nan_handling); |
- __ fst_d(double_store_operand); |
- } |
+ __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); |
+ __ bind(&have_value); |
} |
+ |
+ __ movsd(double_store_operand, value); |
} |
@@ -4692,7 +4239,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { |
__ RecordWrite(elements, |
key, |
value, |
- GetSaveFPRegsMode(isolate()), |
+ kSaveFPRegs, |
EMIT_REMEMBERED_SET, |
check_needed); |
} |
@@ -4786,9 +4333,8 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { |
public: |
DeferredStringCharCodeAt(LCodeGen* codegen, |
- LStringCharCodeAt* instr, |
- const X87Stack& x87_stack) |
- : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
+ LStringCharCodeAt* instr) |
+ : LDeferredCode(codegen), instr_(instr) { } |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredStringCharCodeAt(instr_); |
} |
@@ -4798,7 +4344,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
}; |
DeferredStringCharCodeAt* deferred = |
- new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_); |
+ new(zone()) DeferredStringCharCodeAt(this, instr); |
StringCharLoadGenerator::Generate(masm(), |
factory(), |
@@ -4845,9 +4391,8 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { |
class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { |
public: |
DeferredStringCharFromCode(LCodeGen* codegen, |
- LStringCharFromCode* instr, |
- const X87Stack& x87_stack) |
- : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
+ LStringCharFromCode* instr) |
+ : LDeferredCode(codegen), instr_(instr) { } |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredStringCharFromCode(instr_); |
} |
@@ -4857,7 +4402,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { |
}; |
DeferredStringCharFromCode* deferred = |
- new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_); |
+ new(zone()) DeferredStringCharFromCode(this, instr); |
ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); |
Register char_code = ToRegister(instr->char_code()); |
@@ -4909,36 +4454,17 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
LOperand* output = instr->result(); |
ASSERT(input->IsRegister() || input->IsStackSlot()); |
ASSERT(output->IsDoubleRegister()); |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); |
- } else if (input->IsRegister()) { |
- Register input_reg = ToRegister(input); |
- __ push(input_reg); |
- X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); |
- __ pop(input_reg); |
- } else { |
- X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); |
- } |
+ __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); |
} |
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { |
LOperand* input = instr->value(); |
LOperand* output = instr->result(); |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- LOperand* temp = instr->temp(); |
- |
- __ LoadUint32(ToDoubleRegister(output), |
- ToRegister(input), |
- ToDoubleRegister(temp)); |
- } else { |
- X87Register res = ToX87Register(output); |
- X87PrepareToWrite(res); |
- __ LoadUint32NoSSE2(ToRegister(input)); |
- X87CommitWrite(res); |
- } |
+ LOperand* temp = instr->temp(); |
+ __ LoadUint32(ToDoubleRegister(output), |
+ ToRegister(input), |
+ ToDoubleRegister(temp)); |
} |
@@ -4946,9 +4472,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
class DeferredNumberTagI V8_FINAL : public LDeferredCode { |
public: |
DeferredNumberTagI(LCodeGen* codegen, |
- LNumberTagI* instr, |
- const X87Stack& x87_stack) |
- : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
+ LNumberTagI* instr) |
+ : LDeferredCode(codegen), instr_(instr) { } |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), |
NULL, SIGNED_INT32); |
@@ -4963,7 +4488,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
Register reg = ToRegister(input); |
DeferredNumberTagI* deferred = |
- new(zone()) DeferredNumberTagI(this, instr, x87_stack_); |
+ new(zone()) DeferredNumberTagI(this, instr); |
__ SmiTag(reg); |
__ j(overflow, deferred->entry()); |
__ bind(deferred->exit()); |
@@ -4973,10 +4498,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
void LCodeGen::DoNumberTagU(LNumberTagU* instr) { |
class DeferredNumberTagU V8_FINAL : public LDeferredCode { |
public: |
- DeferredNumberTagU(LCodeGen* codegen, |
- LNumberTagU* instr, |
- const X87Stack& x87_stack) |
- : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) |
+ : LDeferredCode(codegen), instr_(instr) { } |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), |
instr_->temp2(), UNSIGNED_INT32); |
@@ -4991,7 +4514,7 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) { |
Register reg = ToRegister(input); |
DeferredNumberTagU* deferred = |
- new(zone()) DeferredNumberTagU(this, instr, x87_stack_); |
+ new(zone()) DeferredNumberTagU(this, instr); |
__ cmp(reg, Immediate(Smi::kMaxValue)); |
__ j(above, deferred->entry()); |
__ SmiTag(reg); |
@@ -5015,27 +4538,9 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, |
// the value in there. If that fails, call the runtime system. |
__ SmiUntag(reg); |
__ xor_(reg, 0x80000000); |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatureScope feature_scope(masm(), SSE2); |
- __ Cvtsi2sd(xmm_scratch, Operand(reg)); |
- } else { |
- __ push(reg); |
- __ fild_s(Operand(esp, 0)); |
- __ pop(reg); |
- } |
+ __ Cvtsi2sd(xmm_scratch, Operand(reg)); |
} else { |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatureScope feature_scope(masm(), SSE2); |
- __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2)); |
- } else { |
- // There's no fild variant for unsigned values, so zero-extend to a 64-bit |
- // int manually. |
- __ push(Immediate(0)); |
- __ push(reg); |
- __ fild_d(Operand(esp, 0)); |
- __ pop(reg); |
- __ pop(reg); |
- } |
+ __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2)); |
} |
if (FLAG_inline_new) { |
@@ -5069,22 +4574,15 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, |
// Done. Put the value in xmm_scratch into the value of the allocated heap |
// number. |
__ bind(&done); |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatureScope feature_scope(masm(), SSE2); |
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); |
- } else { |
- __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
- } |
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); |
} |
void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
class DeferredNumberTagD V8_FINAL : public LDeferredCode { |
public: |
- DeferredNumberTagD(LCodeGen* codegen, |
- LNumberTagD* instr, |
- const X87Stack& x87_stack) |
- : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
+ : LDeferredCode(codegen), instr_(instr) { } |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredNumberTagD(instr_); |
} |
@@ -5095,15 +4593,8 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
Register reg = ToRegister(instr->result()); |
- bool use_sse2 = CpuFeatures::IsSupported(SSE2); |
- if (!use_sse2) { |
- // Put the value to the top of stack |
- X87Register src = ToX87Register(instr->value()); |
- X87LoadForUsage(src); |
- } |
- |
DeferredNumberTagD* deferred = |
- new(zone()) DeferredNumberTagD(this, instr, x87_stack_); |
+ new(zone()) DeferredNumberTagD(this, instr); |
if (FLAG_inline_new) { |
Register tmp = ToRegister(instr->temp()); |
__ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); |
@@ -5111,13 +4602,8 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
__ jmp(deferred->entry()); |
} |
__ bind(deferred->exit()); |
- if (use_sse2) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister input_reg = ToDoubleRegister(instr->value()); |
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); |
- } else { |
- __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
- } |
+ XMMRegister input_reg = ToDoubleRegister(instr->value()); |
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); |
} |
@@ -5172,76 +4658,6 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
} |
-void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg, |
- Register temp_reg, |
- X87Register res_reg, |
- bool can_convert_undefined_to_nan, |
- bool deoptimize_on_minus_zero, |
- LEnvironment* env, |
- NumberUntagDMode mode) { |
- Label load_smi, done; |
- |
- X87PrepareToWrite(res_reg); |
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
- // Smi check. |
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear); |
- |
- // Heap number map check. |
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
- factory()->heap_number_map()); |
- if (!can_convert_undefined_to_nan) { |
- DeoptimizeIf(not_equal, env); |
- } else { |
- Label heap_number, convert; |
- __ j(equal, &heap_number, Label::kNear); |
- |
- // Convert undefined (or hole) to NaN. |
- __ cmp(input_reg, factory()->undefined_value()); |
- DeoptimizeIf(not_equal, env); |
- |
- __ bind(&convert); |
- ExternalReference nan = |
- ExternalReference::address_of_canonical_non_hole_nan(); |
- __ fld_d(Operand::StaticVariable(nan)); |
- __ jmp(&done, Label::kNear); |
- |
- __ bind(&heap_number); |
- } |
- // Heap number to x87 conversion. |
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); |
- if (deoptimize_on_minus_zero) { |
- __ fldz(); |
- __ FCmp(); |
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); |
- __ j(not_zero, &done, Label::kNear); |
- |
- // Use general purpose registers to check if we have -0.0 |
- __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
- __ test(temp_reg, Immediate(HeapNumber::kSignMask)); |
- __ j(zero, &done, Label::kNear); |
- |
- // Pop FPU stack before deoptimizing. |
- __ fstp(0); |
- DeoptimizeIf(not_zero, env); |
- } |
- __ jmp(&done, Label::kNear); |
- } else { |
- ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); |
- } |
- |
- __ bind(&load_smi); |
- // Clobbering a temp is faster than re-tagging the |
- // input register since we avoid dependencies. |
- __ mov(temp_reg, input_reg); |
- __ SmiUntag(temp_reg); // Untag smi before converting to float. |
- __ push(temp_reg); |
- __ fild_s(Operand(esp, 0)); |
- __ add(esp, Immediate(kPointerSize)); |
- __ bind(&done); |
- X87CommitWrite(res_reg); |
-} |
- |
- |
void LCodeGen::EmitNumberUntagD(Register input_reg, |
Register temp_reg, |
XMMRegister result_reg, |
@@ -5357,10 +4773,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { |
void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
class DeferredTaggedToI V8_FINAL : public LDeferredCode { |
public: |
- DeferredTaggedToI(LCodeGen* codegen, |
- LTaggedToI* instr, |
- const X87Stack& x87_stack) |
- : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
+ : LDeferredCode(codegen), instr_(instr) { } |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredTaggedToI(instr_, done()); |
} |
@@ -5378,7 +4792,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
__ SmiUntag(input_reg); |
} else { |
DeferredTaggedToI* deferred = |
- new(zone()) DeferredTaggedToI(this, instr, x87_stack_); |
+ new(zone()) DeferredTaggedToI(this, instr); |
// Optimistically untag the input. |
// If the input is a HeapObject, SmiUntag will set the carry flag. |
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
@@ -5408,25 +4822,14 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
NumberUntagDMode mode = value->representation().IsSmi() |
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister result_reg = ToDoubleRegister(result); |
- EmitNumberUntagD(input_reg, |
- temp_reg, |
- result_reg, |
- instr->hydrogen()->can_convert_undefined_to_nan(), |
- deoptimize_on_minus_zero, |
- instr->environment(), |
- mode); |
- } else { |
- EmitNumberUntagDNoSSE2(input_reg, |
- temp_reg, |
- ToX87Register(instr->result()), |
- instr->hydrogen()->can_convert_undefined_to_nan(), |
- deoptimize_on_minus_zero, |
- instr->environment(), |
- mode); |
- } |
+ XMMRegister result_reg = ToDoubleRegister(result); |
+ EmitNumberUntagD(input_reg, |
+ temp_reg, |
+ result_reg, |
+ instr->hydrogen()->can_convert_undefined_to_nan(), |
+ deoptimize_on_minus_zero, |
+ instr->environment(), |
+ mode); |
} |
@@ -5438,29 +4841,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
Register result_reg = ToRegister(result); |
if (instr->truncating()) { |
- if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister input_reg = ToDoubleRegister(input); |
- __ TruncateDoubleToI(result_reg, input_reg); |
- } else { |
- X87Register input_reg = ToX87Register(input); |
- X87Fxch(input_reg); |
- __ TruncateX87TOSToI(result_reg); |
- } |
+ XMMRegister input_reg = ToDoubleRegister(input); |
+ __ TruncateDoubleToI(result_reg, input_reg); |
} else { |
Label bailout, done; |
- if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister input_reg = ToDoubleRegister(input); |
- XMMRegister xmm_scratch = double_scratch0(); |
- __ DoubleToI(result_reg, input_reg, xmm_scratch, |
- instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); |
- } else { |
- X87Register input_reg = ToX87Register(input); |
- X87Fxch(input_reg); |
- __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), |
- &bailout, Label::kNear); |
- } |
+ XMMRegister input_reg = ToDoubleRegister(input); |
+ XMMRegister xmm_scratch = double_scratch0(); |
+ __ DoubleToI(result_reg, input_reg, xmm_scratch, |
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); |
__ jmp(&done, Label::kNear); |
__ bind(&bailout); |
DeoptimizeIf(no_condition, instr->environment()); |
@@ -5477,18 +4865,10 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
Register result_reg = ToRegister(result); |
Label bailout, done; |
- if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) { |
- CpuFeatureScope scope(masm(), SSE2); |
- XMMRegister input_reg = ToDoubleRegister(input); |
- XMMRegister xmm_scratch = double_scratch0(); |
- __ DoubleToI(result_reg, input_reg, xmm_scratch, |
- instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); |
- } else { |
- X87Register input_reg = ToX87Register(input); |
- X87Fxch(input_reg); |
- __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), |
- &bailout, Label::kNear); |
- } |
+ XMMRegister input_reg = ToDoubleRegister(input); |
+ XMMRegister xmm_scratch = double_scratch0(); |
+ __ DoubleToI(result_reg, input_reg, xmm_scratch, |
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); |
__ jmp(&done, Label::kNear); |
__ bind(&bailout); |
DeoptimizeIf(no_condition, instr->environment()); |
@@ -5592,11 +4972,8 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
class DeferredCheckMaps V8_FINAL : public LDeferredCode { |
public: |
- DeferredCheckMaps(LCodeGen* codegen, |
- LCheckMaps* instr, |
- Register object, |
- const X87Stack& x87_stack) |
- : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) { |
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
+ : LDeferredCode(codegen), instr_(instr), object_(object) { |
SetExit(check_maps()); |
} |
virtual void Generate() V8_OVERRIDE { |
@@ -5624,7 +5001,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
DeferredCheckMaps* deferred = NULL; |
if (instr->hydrogen()->HasMigrationTarget()) { |
- deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_); |
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg); |
__ bind(deferred->check_maps()); |
} |
@@ -5649,7 +5026,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
- CpuFeatureScope scope(masm(), SSE2); |
XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); |
XMMRegister xmm_scratch = double_scratch0(); |
Register result_reg = ToRegister(instr->result()); |
@@ -5665,8 +5041,6 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { |
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
- CpuFeatureScope scope(masm(), SSE2); |
- |
ASSERT(instr->unclamped()->Equals(instr->result())); |
Register input_reg = ToRegister(instr->unclamped()); |
XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); |
@@ -5701,130 +5075,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
} |
-void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { |
- Register input_reg = ToRegister(instr->unclamped()); |
- Register result_reg = ToRegister(instr->result()); |
- Register scratch = ToRegister(instr->scratch()); |
- Register scratch2 = ToRegister(instr->scratch2()); |
- Register scratch3 = ToRegister(instr->scratch3()); |
- Label is_smi, done, heap_number, valid_exponent, |
- largest_value, zero_result, maybe_nan_or_infinity; |
- |
- __ JumpIfSmi(input_reg, &is_smi); |
- |
- // Check for heap number |
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
- factory()->heap_number_map()); |
- __ j(equal, &heap_number, Label::kNear); |
- |
- // Check for undefined. Undefined is converted to zero for clamping |
- // conversions. |
- __ cmp(input_reg, factory()->undefined_value()); |
- DeoptimizeIf(not_equal, instr->environment()); |
- __ jmp(&zero_result, Label::kNear); |
- |
- // Heap number |
- __ bind(&heap_number); |
- |
- // Surprisingly, all of the hand-crafted bit-manipulations below are much |
- // faster than the x86 FPU built-in instruction, especially since "banker's |
- // rounding" would be additionally very expensive |
- |
- // Get exponent word. |
- __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
- __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); |
- |
- // Test for negative values --> clamp to zero |
- __ test(scratch, scratch); |
- __ j(negative, &zero_result, Label::kNear); |
- |
- // Get exponent alone in scratch2. |
- __ mov(scratch2, scratch); |
- __ and_(scratch2, HeapNumber::kExponentMask); |
- __ shr(scratch2, HeapNumber::kExponentShift); |
- __ j(zero, &zero_result, Label::kNear); |
- __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); |
- __ j(negative, &zero_result, Label::kNear); |
- |
- const uint32_t non_int8_exponent = 7; |
- __ cmp(scratch2, Immediate(non_int8_exponent + 1)); |
- // If the exponent is too big, check for special values. |
- __ j(greater, &maybe_nan_or_infinity, Label::kNear); |
- |
- __ bind(&valid_exponent); |
- // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent |
- // < 7. The shift bias is the number of bits to shift the mantissa such that |
- // with an exponent of 7 such the that top-most one is in bit 30, allowing |
- // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to |
- // 1). |
- int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1; |
- __ lea(result_reg, MemOperand(scratch2, shift_bias)); |
- // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the |
- // top bits of the mantissa. |
- __ and_(scratch, HeapNumber::kMantissaMask); |
- // Put back the implicit 1 of the mantissa |
- __ or_(scratch, 1 << HeapNumber::kExponentShift); |
- // Shift up to round |
- __ shl_cl(scratch); |
- // Use "banker's rounding" to spec: If fractional part of number is 0.5, then |
- // use the bit in the "ones" place and add it to the "halves" place, which has |
- // the effect of rounding to even. |
- __ mov(scratch2, scratch); |
- const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8; |
- const uint32_t one_bit_shift = one_half_bit_shift + 1; |
- __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); |
- __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); |
- Label no_round; |
- __ j(less, &no_round, Label::kNear); |
- Label round_up; |
- __ mov(scratch2, Immediate(1 << one_half_bit_shift)); |
- __ j(greater, &round_up, Label::kNear); |
- __ test(scratch3, scratch3); |
- __ j(not_zero, &round_up, Label::kNear); |
- __ mov(scratch2, scratch); |
- __ and_(scratch2, Immediate(1 << one_bit_shift)); |
- __ shr(scratch2, 1); |
- __ bind(&round_up); |
- __ add(scratch, scratch2); |
- __ j(overflow, &largest_value, Label::kNear); |
- __ bind(&no_round); |
- __ shr(scratch, 23); |
- __ mov(result_reg, scratch); |
- __ jmp(&done, Label::kNear); |
- |
- __ bind(&maybe_nan_or_infinity); |
- // Check for NaN/Infinity, all other values map to 255 |
- __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1)); |
- __ j(not_equal, &largest_value, Label::kNear); |
- |
- // Check for NaN, which differs from Infinity in that at least one mantissa |
- // bit is set. |
- __ and_(scratch, HeapNumber::kMantissaMask); |
- __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); |
- __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN |
- // Infinity -> Fall through to map to 255. |
- |
- __ bind(&largest_value); |
- __ mov(result_reg, Immediate(255)); |
- __ jmp(&done, Label::kNear); |
- |
- __ bind(&zero_result); |
- __ xor_(result_reg, result_reg); |
- __ jmp(&done, Label::kNear); |
- |
- // smi |
- __ bind(&is_smi); |
- if (!input_reg.is(result_reg)) { |
- __ mov(result_reg, input_reg); |
- } |
- __ SmiUntag(result_reg); |
- __ ClampUint8(result_reg); |
- __ bind(&done); |
-} |
- |
- |
void LCodeGen::DoDoubleBits(LDoubleBits* instr) { |
- CpuFeatureScope scope(masm(), SSE2); |
XMMRegister value_reg = ToDoubleRegister(instr->value()); |
Register result_reg = ToRegister(instr->result()); |
if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { |
@@ -5846,7 +5097,6 @@ void LCodeGen::DoConstructDouble(LConstructDouble* instr) { |
Register hi_reg = ToRegister(instr->hi()); |
Register lo_reg = ToRegister(instr->lo()); |
XMMRegister result_reg = ToDoubleRegister(instr->result()); |
- CpuFeatureScope scope(masm(), SSE2); |
if (CpuFeatures::IsSupported(SSE4_1)) { |
CpuFeatureScope scope2(masm(), SSE4_1); |
@@ -5865,10 +5115,8 @@ void LCodeGen::DoConstructDouble(LConstructDouble* instr) { |
void LCodeGen::DoAllocate(LAllocate* instr) { |
class DeferredAllocate V8_FINAL : public LDeferredCode { |
public: |
- DeferredAllocate(LCodeGen* codegen, |
- LAllocate* instr, |
- const X87Stack& x87_stack) |
- : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr) |
+ : LDeferredCode(codegen), instr_(instr) { } |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredAllocate(instr_); |
} |
@@ -5877,8 +5125,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) { |
LAllocate* instr_; |
}; |
- DeferredAllocate* deferred = |
- new(zone()) DeferredAllocate(this, instr, x87_stack_); |
+ DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr); |
Register result = ToRegister(instr->result()); |
Register temp = ToRegister(instr->temp()); |
@@ -6243,10 +5490,8 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { |
void LCodeGen::DoStackCheck(LStackCheck* instr) { |
class DeferredStackCheck V8_FINAL : public LDeferredCode { |
public: |
- DeferredStackCheck(LCodeGen* codegen, |
- LStackCheck* instr, |
- const X87Stack& x87_stack) |
- : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) |
+ : LDeferredCode(codegen), instr_(instr) { } |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredStackCheck(instr_); |
} |
@@ -6277,7 +5522,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { |
ASSERT(instr->hydrogen()->is_backwards_branch()); |
// Perform stack overflow check if this goto needs it before jumping. |
DeferredStackCheck* deferred_stack_check = |
- new(zone()) DeferredStackCheck(this, instr, x87_stack_); |
+ new(zone()) DeferredStackCheck(this, instr); |
ExternalReference stack_limit = |
ExternalReference::address_of_stack_limit(isolate()); |
__ cmp(esp, Operand::StaticVariable(stack_limit)); |
@@ -6391,9 +5636,8 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
DeferredLoadMutableDouble(LCodeGen* codegen, |
LLoadFieldByIndex* instr, |
Register object, |
- Register index, |
- const X87Stack& x87_stack) |
- : LDeferredCode(codegen, x87_stack), |
+ Register index) |
+ : LDeferredCode(codegen), |
instr_(instr), |
object_(object), |
index_(index) { |
@@ -6413,7 +5657,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
DeferredLoadMutableDouble* deferred; |
deferred = new(zone()) DeferredLoadMutableDouble( |
- this, instr, object, index, x87_stack_); |
+ this, instr, object, index); |
Label out_of_object, done; |
__ test(index, Immediate(Smi::FromInt(1))); |