| Index: src/x87/lithium-codegen-x87.cc
|
| diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/x87/lithium-codegen-x87.cc
|
| similarity index 87%
|
| copy from src/ia32/lithium-codegen-ia32.cc
|
| copy to src/x87/lithium-codegen-x87.cc
|
| index 1d97d8a91f255b4ca5c085a647522e2eef99a31e..8b370710fd88d5868be904272fbff9c1f590cb53 100644
|
| --- a/src/ia32/lithium-codegen-ia32.cc
|
| +++ b/src/x87/lithium-codegen-x87.cc
|
| @@ -4,9 +4,9 @@
|
|
|
| #include "v8.h"
|
|
|
| -#if V8_TARGET_ARCH_IA32
|
| +#if V8_TARGET_ARCH_X87
|
|
|
| -#include "ia32/lithium-codegen-ia32.h"
|
| +#include "x87/lithium-codegen-x87.h"
|
| #include "ic.h"
|
| #include "code-stubs.h"
|
| #include "deoptimizer.h"
|
| @@ -17,6 +17,7 @@
|
| namespace v8 {
|
| namespace internal {
|
|
|
| +
|
| // When invoking builtins, we need to record the safepoint in the middle of
|
| // the invoke instruction sequence generated by the macro assembler.
|
| class SafepointGenerator V8_FINAL : public CallWrapper {
|
| @@ -91,38 +92,6 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) {
|
| #endif
|
|
|
|
|
| -void LCodeGen::SaveCallerDoubles() {
|
| - ASSERT(info()->saves_caller_doubles());
|
| - ASSERT(NeedsEagerFrame());
|
| - Comment(";;; Save clobbered callee double registers");
|
| - int count = 0;
|
| - BitVector* doubles = chunk()->allocated_double_registers();
|
| - BitVector::Iterator save_iterator(doubles);
|
| - while (!save_iterator.Done()) {
|
| - __ movsd(MemOperand(esp, count * kDoubleSize),
|
| - XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
| - save_iterator.Advance();
|
| - count++;
|
| - }
|
| -}
|
| -
|
| -
|
| -void LCodeGen::RestoreCallerDoubles() {
|
| - ASSERT(info()->saves_caller_doubles());
|
| - ASSERT(NeedsEagerFrame());
|
| - Comment(";;; Restore clobbered callee double registers");
|
| - BitVector* doubles = chunk()->allocated_double_registers();
|
| - BitVector::Iterator save_iterator(doubles);
|
| - int count = 0;
|
| - while (!save_iterator.Done()) {
|
| - __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
| - MemOperand(esp, count * kDoubleSize));
|
| - save_iterator.Advance();
|
| - count++;
|
| - }
|
| -}
|
| -
|
| -
|
| bool LCodeGen::GeneratePrologue() {
|
| ASSERT(is_generating());
|
|
|
| @@ -246,8 +215,6 @@ bool LCodeGen::GeneratePrologue() {
|
| }
|
| }
|
| }
|
| -
|
| - if (info()->saves_caller_doubles()) SaveCallerDoubles();
|
| }
|
|
|
| // Possibly allocate a local context.
|
| @@ -284,8 +251,7 @@ bool LCodeGen::GeneratePrologue() {
|
| __ RecordWriteContextSlot(esi,
|
| context_offset,
|
| eax,
|
| - ebx,
|
| - kDontSaveFPRegs);
|
| + ebx);
|
| }
|
| }
|
| Comment(";;; End allocate local context");
|
| @@ -359,10 +325,25 @@ void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
|
| if (!instr->IsLazyBailout() && !instr->IsGap()) {
|
| safepoints_.BumpLastLazySafepointIndex();
|
| }
|
| + FlushX87StackIfNecessary(instr);
|
| }
|
|
|
|
|
| -void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { }
|
| +void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
|
| + if (instr->IsGoto()) {
|
| + x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
|
| + } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
|
| + !instr->IsGap() && !instr->IsReturn()) {
|
| + if (instr->ClobbersDoubleRegisters(isolate())) {
|
| + if (instr->HasDoubleRegisterResult()) {
|
| + ASSERT_EQ(1, x87_stack_.depth());
|
| + } else {
|
| + ASSERT_EQ(0, x87_stack_.depth());
|
| + }
|
| + }
|
| + __ VerifyX87StackDepth(x87_stack_.depth());
|
| + }
|
| +}
|
|
|
|
|
| bool LCodeGen::GenerateJumpTable() {
|
| @@ -408,7 +389,6 @@ bool LCodeGen::GenerateJumpTable() {
|
| __ ret(0); // Call the continuation without clobbering registers.
|
| }
|
| } else {
|
| - if (info()->saves_caller_doubles()) RestoreCallerDoubles();
|
| __ call(entry, RelocInfo::RUNTIME_ENTRY);
|
| }
|
| }
|
| @@ -421,6 +401,8 @@ bool LCodeGen::GenerateDeferredCode() {
|
| if (deferred_.length() > 0) {
|
| for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
|
| LDeferredCode* code = deferred_[i];
|
| + X87Stack copy(code->x87_stack());
|
| + x87_stack_ = copy;
|
|
|
| HValue* value =
|
| instructions_->at(code->instruction_index())->hydrogen_value();
|
| @@ -486,8 +468,212 @@ Register LCodeGen::ToRegister(int index) const {
|
| }
|
|
|
|
|
| -XMMRegister LCodeGen::ToDoubleRegister(int index) const {
|
| - return XMMRegister::FromAllocationIndex(index);
|
| +X87Register LCodeGen::ToX87Register(int index) const {
|
| + return X87Register::FromAllocationIndex(index);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::X87LoadForUsage(X87Register reg) {
|
| + ASSERT(x87_stack_.Contains(reg));
|
| + x87_stack_.Fxch(reg);
|
| + x87_stack_.pop();
|
| +}
|
| +
|
| +
|
| +void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
|
| + ASSERT(x87_stack_.Contains(reg1));
|
| + ASSERT(x87_stack_.Contains(reg2));
|
| + x87_stack_.Fxch(reg1, 1);
|
| + x87_stack_.Fxch(reg2);
|
| + x87_stack_.pop();
|
| + x87_stack_.pop();
|
| +}
|
| +
|
| +
|
| +void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
|
| + ASSERT(is_mutable_);
|
| + ASSERT(Contains(reg) && stack_depth_ > other_slot);
|
| + int i = ArrayIndex(reg);
|
| + int st = st2idx(i);
|
| + if (st != other_slot) {
|
| + int other_i = st2idx(other_slot);
|
| + X87Register other = stack_[other_i];
|
| + stack_[other_i] = reg;
|
| + stack_[i] = other;
|
| + if (st == 0) {
|
| + __ fxch(other_slot);
|
| + } else if (other_slot == 0) {
|
| + __ fxch(st);
|
| + } else {
|
| + __ fxch(st);
|
| + __ fxch(other_slot);
|
| + __ fxch(st);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| +int LCodeGen::X87Stack::st2idx(int pos) {
|
| + return stack_depth_ - pos - 1;
|
| +}
|
| +
|
| +
|
| +int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
|
| + for (int i = 0; i < stack_depth_; i++) {
|
| + if (stack_[i].is(reg)) return i;
|
| + }
|
| + UNREACHABLE();
|
| + return -1;
|
| +}
|
| +
|
| +
|
| +bool LCodeGen::X87Stack::Contains(X87Register reg) {
|
| + for (int i = 0; i < stack_depth_; i++) {
|
| + if (stack_[i].is(reg)) return true;
|
| + }
|
| + return false;
|
| +}
|
| +
|
| +
|
| +void LCodeGen::X87Stack::Free(X87Register reg) {
|
| + ASSERT(is_mutable_);
|
| + ASSERT(Contains(reg));
|
| + int i = ArrayIndex(reg);
|
| + int st = st2idx(i);
|
| + if (st > 0) {
|
| + // keep track of how fstp(i) changes the order of elements
|
| + int tos_i = st2idx(0);
|
| + stack_[i] = stack_[tos_i];
|
| + }
|
| + pop();
|
| + __ fstp(st);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
|
| + if (x87_stack_.Contains(dst)) {
|
| + x87_stack_.Fxch(dst);
|
| + __ fstp(0);
|
| + } else {
|
| + x87_stack_.push(dst);
|
| + }
|
| + X87Fld(src, opts);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
|
| + ASSERT(!src.is_reg_only());
|
| + switch (opts) {
|
| + case kX87DoubleOperand:
|
| + __ fld_d(src);
|
| + break;
|
| + case kX87FloatOperand:
|
| + __ fld_s(src);
|
| + break;
|
| + case kX87IntOperand:
|
| + __ fild_s(src);
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| +}
|
| +
|
| +
|
| +void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
|
| + ASSERT(!dst.is_reg_only());
|
| + x87_stack_.Fxch(src);
|
| + switch (opts) {
|
| + case kX87DoubleOperand:
|
| + __ fst_d(dst);
|
| + break;
|
| + case kX87IntOperand:
|
| + __ fist_s(dst);
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| +}
|
| +
|
| +
|
| +void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
|
| + ASSERT(is_mutable_);
|
| + if (Contains(reg)) {
|
| + Free(reg);
|
| + }
|
| + // Mark this register as the next register to write to
|
| + stack_[stack_depth_] = reg;
|
| +}
|
| +
|
| +
|
| +void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
|
| + ASSERT(is_mutable_);
|
| + // Assert the reg is prepared to write, but not on the virtual stack yet
|
| + ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
|
| + stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
|
| + stack_depth_++;
|
| +}
|
| +
|
| +
|
| +void LCodeGen::X87PrepareBinaryOp(
|
| + X87Register left, X87Register right, X87Register result) {
|
| + // You need to use DefineSameAsFirst for x87 instructions
|
| + ASSERT(result.is(left));
|
| + x87_stack_.Fxch(right, 1);
|
| + x87_stack_.Fxch(left);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
|
| + if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
|
| + bool double_inputs = instr->HasDoubleRegisterInput();
|
| +
|
| + // Flush stack from tos down, since FreeX87() will mess with tos
|
| + for (int i = stack_depth_-1; i >= 0; i--) {
|
| + X87Register reg = stack_[i];
|
| + // Skip registers which contain the inputs for the next instruction
|
| + // when flushing the stack
|
| + if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
|
| + continue;
|
| + }
|
| + Free(reg);
|
| + if (i < stack_depth_-1) i++;
|
| + }
|
| + }
|
| + if (instr->IsReturn()) {
|
| + while (stack_depth_ > 0) {
|
| + __ fstp(0);
|
| + stack_depth_--;
|
| + }
|
| + if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
|
| + }
|
| +}
|
| +
|
| +
|
| +void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
|
| + ASSERT(stack_depth_ <= 1);
|
| + // If ever used for new stubs producing two pairs of doubles joined into two
|
| + // phis this assert hits. That situation is not handled, since the two stacks
|
| + // might have st0 and st1 swapped.
|
| + if (current_block_id + 1 != goto_instr->block_id()) {
|
| + // If we have a value on the x87 stack on leaving a block, it must be a
|
| + // phi input. If the next block we compile is not the join block, we have
|
| + // to discard the stack state.
|
| + stack_depth_ = 0;
|
| + }
|
| +}
|
| +
|
| +
|
| +void LCodeGen::EmitFlushX87ForDeopt() {
|
| + // The deoptimizer does not support X87 Registers. But as long as we
|
| + // deopt from a stub its not a problem, since we will re-materialize the
|
| + // original stub inputs, which can't be double registers.
|
| + ASSERT(info()->IsStub());
|
| + if (FLAG_debug_code && FLAG_enable_slow_asserts) {
|
| + __ pushfd();
|
| + __ VerifyX87StackDepth(x87_stack_.depth());
|
| + __ popfd();
|
| + }
|
| + for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
|
| }
|
|
|
|
|
| @@ -497,9 +683,9 @@ Register LCodeGen::ToRegister(LOperand* op) const {
|
| }
|
|
|
|
|
| -XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
|
| +X87Register LCodeGen::ToX87Register(LOperand* op) const {
|
| ASSERT(op->IsDoubleRegister());
|
| - return ToDoubleRegister(op->index());
|
| + return ToX87Register(op->index());
|
| }
|
|
|
|
|
| @@ -557,7 +743,7 @@ static int ArgumentsOffsetWithoutFrame(int index) {
|
|
|
| Operand LCodeGen::ToOperand(LOperand* op) const {
|
| if (op->IsRegister()) return Operand(ToRegister(op));
|
| - if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
|
| + ASSERT(!op->IsDoubleRegister());
|
| ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
|
| if (NeedsEagerFrame()) {
|
| return Operand(ebp, StackSlotOffset(op->index()));
|
| @@ -694,9 +880,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
|
| } else {
|
| translation->StoreInt32Register(reg);
|
| }
|
| - } else if (op->IsDoubleRegister()) {
|
| - XMMRegister reg = ToDoubleRegister(op);
|
| - translation->StoreDoubleRegister(reg);
|
| } else if (op->IsConstantOperand()) {
|
| HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
|
| int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
|
| @@ -733,12 +916,11 @@ void LCodeGen::CallCode(Handle<Code> code,
|
|
|
| void LCodeGen::CallRuntime(const Runtime::Function* fun,
|
| int argc,
|
| - LInstruction* instr,
|
| - SaveFPRegsMode save_doubles) {
|
| + LInstruction* instr) {
|
| ASSERT(instr != NULL);
|
| ASSERT(instr->HasPointerMap());
|
|
|
| - __ CallRuntime(fun, argc, save_doubles);
|
| + __ CallRuntime(fun, argc);
|
|
|
| RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
|
|
|
| @@ -768,7 +950,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
|
| LOperand* context) {
|
| LoadContextFromDeferred(context);
|
|
|
| - __ CallRuntimeSaveDoubles(id);
|
| + __ CallRuntime(id);
|
| RecordSafepointWithRegisters(
|
| instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
|
|
|
| @@ -848,6 +1030,17 @@ void LCodeGen::DeoptimizeIf(Condition cc,
|
| __ popfd();
|
| }
|
|
|
| + // Before Instructions which can deopt, we normally flush the x87 stack. But
|
| + // we can have inputs or outputs of the current instruction on the stack,
|
| + // thus we need to flush them here from the physical stack to leave it in a
|
| + // consistent state.
|
| + if (x87_stack_.depth() > 0) {
|
| + Label done;
|
| + if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
|
| + EmitFlushX87ForDeopt();
|
| + __ bind(&done);
|
| + }
|
| +
|
| if (info()->ShouldTrapOnDeopt()) {
|
| Label done;
|
| if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
|
| @@ -1708,35 +1901,11 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
|
| int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
|
| ASSERT(instr->result()->IsDoubleRegister());
|
|
|
| - XMMRegister res = ToDoubleRegister(instr->result());
|
| - if (int_val == 0) {
|
| - __ xorps(res, res);
|
| - } else {
|
| - Register temp = ToRegister(instr->temp());
|
| - if (CpuFeatures::IsSupported(SSE4_1)) {
|
| - CpuFeatureScope scope2(masm(), SSE4_1);
|
| - if (lower != 0) {
|
| - __ Move(temp, Immediate(lower));
|
| - __ movd(res, Operand(temp));
|
| - __ Move(temp, Immediate(upper));
|
| - __ pinsrd(res, Operand(temp), 1);
|
| - } else {
|
| - __ xorps(res, res);
|
| - __ Move(temp, Immediate(upper));
|
| - __ pinsrd(res, Operand(temp), 1);
|
| - }
|
| - } else {
|
| - __ Move(temp, Immediate(upper));
|
| - __ movd(res, Operand(temp));
|
| - __ psllq(res, 32);
|
| - if (lower != 0) {
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - __ Move(temp, Immediate(lower));
|
| - __ movd(xmm_scratch, Operand(temp));
|
| - __ orps(res, xmm_scratch);
|
| - }
|
| - }
|
| - }
|
| + __ push(Immediate(upper));
|
| + __ push(Immediate(lower));
|
| + X87Register reg = ToX87Register(instr->result());
|
| + X87Mov(reg, Operand(esp, 0));
|
| + __ add(Operand(esp), Immediate(kDoubleSize));
|
| }
|
|
|
|
|
| @@ -1934,77 +2103,46 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
|
| }
|
| __ bind(&return_left);
|
| } else {
|
| - ASSERT(instr->hydrogen()->representation().IsDouble());
|
| - Label check_nan_left, check_zero, return_left, return_right;
|
| - Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
|
| - XMMRegister left_reg = ToDoubleRegister(left);
|
| - XMMRegister right_reg = ToDoubleRegister(right);
|
| - __ ucomisd(left_reg, right_reg);
|
| - __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
|
| - __ j(equal, &check_zero, Label::kNear); // left == right.
|
| - __ j(condition, &return_left, Label::kNear);
|
| - __ jmp(&return_right, Label::kNear);
|
| -
|
| - __ bind(&check_zero);
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - __ xorps(xmm_scratch, xmm_scratch);
|
| - __ ucomisd(left_reg, xmm_scratch);
|
| - __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
|
| - // At this point, both left and right are either 0 or -0.
|
| - if (operation == HMathMinMax::kMathMin) {
|
| - __ orpd(left_reg, right_reg);
|
| - } else {
|
| - // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
|
| - __ addsd(left_reg, right_reg);
|
| - }
|
| - __ jmp(&return_left, Label::kNear);
|
| -
|
| - __ bind(&check_nan_left);
|
| - __ ucomisd(left_reg, left_reg); // NaN check.
|
| - __ j(parity_even, &return_left, Label::kNear); // left == NaN.
|
| - __ bind(&return_right);
|
| - __ movaps(left_reg, right_reg);
|
| -
|
| - __ bind(&return_left);
|
| + // TODO(weiliang) use X87 for double representation.
|
| + UNIMPLEMENTED();
|
| }
|
| }
|
|
|
|
|
| void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
|
| - XMMRegister left = ToDoubleRegister(instr->left());
|
| - XMMRegister right = ToDoubleRegister(instr->right());
|
| - XMMRegister result = ToDoubleRegister(instr->result());
|
| + X87Register left = ToX87Register(instr->left());
|
| + X87Register right = ToX87Register(instr->right());
|
| + X87Register result = ToX87Register(instr->result());
|
| + if (instr->op() != Token::MOD) {
|
| + X87PrepareBinaryOp(left, right, result);
|
| + }
|
| switch (instr->op()) {
|
| case Token::ADD:
|
| - __ addsd(left, right);
|
| + __ fadd_i(1);
|
| break;
|
| case Token::SUB:
|
| - __ subsd(left, right);
|
| + __ fsub_i(1);
|
| break;
|
| case Token::MUL:
|
| - __ mulsd(left, right);
|
| + __ fmul_i(1);
|
| break;
|
| case Token::DIV:
|
| - __ divsd(left, right);
|
| - // Don't delete this mov. It may improve performance on some CPUs,
|
| - // when there is a mulsd depending on the result
|
| - __ movaps(left, left);
|
| + __ fdiv_i(1);
|
| break;
|
| case Token::MOD: {
|
| // Pass two doubles as arguments on the stack.
|
| __ PrepareCallCFunction(4, eax);
|
| - __ movsd(Operand(esp, 0 * kDoubleSize), left);
|
| - __ movsd(Operand(esp, 1 * kDoubleSize), right);
|
| + X87Mov(Operand(esp, 1 * kDoubleSize), right);
|
| + X87Mov(Operand(esp, 0), left);
|
| + X87Free(right);
|
| + ASSERT(left.is(result));
|
| + X87PrepareToWrite(result);
|
| __ CallCFunction(
|
| ExternalReference::mod_two_doubles_operation(isolate()),
|
| 4);
|
|
|
| // Return value is in st(0) on ia32.
|
| - // Store it into the result register.
|
| - __ sub(Operand(esp), Immediate(kDoubleSize));
|
| - __ fstp_d(Operand(esp, 0));
|
| - __ movsd(result, Operand(esp, 0));
|
| - __ add(Operand(esp), Immediate(kDoubleSize));
|
| + X87CommitWrite(result);
|
| break;
|
| }
|
| default:
|
| @@ -2063,12 +2201,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
| __ test(reg, Operand(reg));
|
| EmitBranch(instr, not_zero);
|
| } else if (r.IsDouble()) {
|
| - ASSERT(!info()->IsStub());
|
| - XMMRegister reg = ToDoubleRegister(instr->value());
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - __ xorps(xmm_scratch, xmm_scratch);
|
| - __ ucomisd(reg, xmm_scratch);
|
| - EmitBranch(instr, not_equal);
|
| + UNREACHABLE();
|
| } else {
|
| ASSERT(r.IsTagged());
|
| Register reg = ToRegister(instr->value());
|
| @@ -2085,11 +2218,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
| ASSERT(!info()->IsStub());
|
| EmitBranch(instr, no_condition);
|
| } else if (type.IsHeapNumber()) {
|
| - ASSERT(!info()->IsStub());
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - __ xorps(xmm_scratch, xmm_scratch);
|
| - __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
|
| - EmitBranch(instr, not_equal);
|
| + UNREACHABLE();
|
| } else if (type.IsString()) {
|
| ASSERT(!info()->IsStub());
|
| __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
|
| @@ -2171,9 +2300,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
| __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
|
| factory()->heap_number_map());
|
| __ j(not_equal, ¬_heap_number, Label::kNear);
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - __ xorps(xmm_scratch, xmm_scratch);
|
| - __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
|
| + __ fldz();
|
| + __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
|
| + __ FCmp();
|
| __ j(zero, instr->FalseLabel(chunk_));
|
| __ jmp(instr->TrueLabel(chunk_));
|
| __ bind(¬_heap_number);
|
| @@ -2196,6 +2325,10 @@ void LCodeGen::EmitGoto(int block) {
|
| }
|
|
|
|
|
| +void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoGoto(LGoto* instr) {
|
| EmitGoto(instr->block_id());
|
| }
|
| @@ -2249,7 +2382,8 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
|
| EmitGoto(next_block);
|
| } else {
|
| if (instr->is_double()) {
|
| - __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
|
| + X87LoadForUsage(ToX87Register(right), ToX87Register(left));
|
| + __ FCmp();
|
| // Don't base result on EFLAGS when a NaN is involved. Instead
|
| // jump to the false block.
|
| __ j(parity_even, instr->FalseLabel(chunk_));
|
| @@ -2293,12 +2427,21 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
|
| return;
|
| }
|
|
|
| - XMMRegister input_reg = ToDoubleRegister(instr->object());
|
| - __ ucomisd(input_reg, input_reg);
|
| - EmitFalseBranch(instr, parity_odd);
|
| + // Put the value to the top of stack
|
| + X87Register src = ToX87Register(instr->object());
|
| + X87LoadForUsage(src);
|
| + __ fld(0);
|
| + __ fld(0);
|
| + __ FCmp();
|
| + Label ok;
|
| + __ j(parity_even, &ok, Label::kNear);
|
| + __ fstp(0);
|
| + EmitFalseBranch(instr, no_condition);
|
| + __ bind(&ok);
|
| +
|
|
|
| __ sub(esp, Immediate(kDoubleSize));
|
| - __ movsd(MemOperand(esp, 0), input_reg);
|
| + __ fstp_d(MemOperand(esp, 0));
|
|
|
| __ add(esp, Immediate(kDoubleSize));
|
| int offset = sizeof(kHoleNanUpper32);
|
| @@ -2310,17 +2453,9 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
|
| void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
|
| Representation rep = instr->hydrogen()->value()->representation();
|
| ASSERT(!rep.IsInteger32());
|
| - Register scratch = ToRegister(instr->temp());
|
|
|
| if (rep.IsDouble()) {
|
| - XMMRegister value = ToDoubleRegister(instr->value());
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - __ xorps(xmm_scratch, xmm_scratch);
|
| - __ ucomisd(xmm_scratch, value);
|
| - EmitFalseBranch(instr, not_equal);
|
| - __ movmskpd(scratch, value);
|
| - __ test(scratch, Immediate(1));
|
| - EmitBranch(instr, not_zero);
|
| + UNREACHABLE();
|
| } else {
|
| Register value = ToRegister(instr->value());
|
| Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
|
| @@ -2615,8 +2750,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
| class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
|
| public:
|
| DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
|
| - LInstanceOfKnownGlobal* instr)
|
| - : LDeferredCode(codegen), instr_(instr) { }
|
| + LInstanceOfKnownGlobal* instr,
|
| + const X87Stack& x87_stack)
|
| + : LDeferredCode(codegen, x87_stack), instr_(instr) { }
|
| virtual void Generate() V8_OVERRIDE {
|
| codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
|
| }
|
| @@ -2628,7 +2764,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
| };
|
|
|
| DeferredInstanceOfKnownGlobal* deferred;
|
| - deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
|
| + deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
|
|
|
| Label done, false_result;
|
| Register object = ToRegister(instr->value());
|
| @@ -2777,7 +2913,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
| __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| __ CallRuntime(Runtime::kTraceExit, 1);
|
| }
|
| - if (info()->saves_caller_doubles()) RestoreCallerDoubles();
|
| if (dynamic_frame_alignment_) {
|
| // Fetch the state of the dynamic frame alignment.
|
| __ mov(edx, Operand(ebp,
|
| @@ -2892,7 +3027,6 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
|
| offset,
|
| value,
|
| temp,
|
| - kSaveFPRegs,
|
| EMIT_REMEMBERED_SET,
|
| check_needed);
|
| }
|
| @@ -2917,8 +3051,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
|
|
| Register object = ToRegister(instr->object());
|
| if (instr->hydrogen()->representation().IsDouble()) {
|
| - XMMRegister result = ToDoubleRegister(instr->result());
|
| - __ movsd(result, FieldOperand(object, offset));
|
| + X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
|
| return;
|
| }
|
|
|
| @@ -3044,12 +3177,10 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
| instr->base_offset()));
|
| if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
|
| elements_kind == FLOAT32_ELEMENTS) {
|
| - XMMRegister result(ToDoubleRegister(instr->result()));
|
| - __ movss(result, operand);
|
| - __ cvtss2sd(result, result);
|
| + X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
|
| } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
|
| elements_kind == FLOAT64_ELEMENTS) {
|
| - __ movsd(ToDoubleRegister(instr->result()), operand);
|
| + X87Mov(ToX87Register(instr->result()), operand);
|
| } else {
|
| Register result(ToRegister(instr->result()));
|
| switch (elements_kind) {
|
| @@ -3119,8 +3250,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
|
| instr->hydrogen()->key()->representation(),
|
| FAST_DOUBLE_ELEMENTS,
|
| instr->base_offset());
|
| - XMMRegister result = ToDoubleRegister(instr->result());
|
| - __ movsd(result, double_load_operand);
|
| + X87Mov(ToX87Register(instr->result()), double_load_operand);
|
| }
|
|
|
|
|
| @@ -3537,8 +3667,9 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
|
| class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
|
| public:
|
| DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
|
| - LMathAbs* instr)
|
| - : LDeferredCode(codegen), instr_(instr) { }
|
| + LMathAbs* instr,
|
| + const X87Stack& x87_stack)
|
| + : LDeferredCode(codegen, x87_stack), instr_(instr) { }
|
| virtual void Generate() V8_OVERRIDE {
|
| codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
|
| }
|
| @@ -3551,16 +3682,12 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
|
| Representation r = instr->hydrogen()->value()->representation();
|
|
|
| if (r.IsDouble()) {
|
| - XMMRegister scratch = double_scratch0();
|
| - XMMRegister input_reg = ToDoubleRegister(instr->value());
|
| - __ xorps(scratch, scratch);
|
| - __ subsd(scratch, input_reg);
|
| - __ andps(input_reg, scratch);
|
| + UNIMPLEMENTED();
|
| } else if (r.IsSmiOrInteger32()) {
|
| EmitIntegerMathAbs(instr);
|
| } else { // Tagged case.
|
| DeferredMathAbsTaggedHeapNumber* deferred =
|
| - new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
|
| + new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
|
| Register input_reg = ToRegister(instr->value());
|
| // Smi check.
|
| __ JumpIfNotSmi(input_reg, deferred->entry());
|
| @@ -3571,259 +3698,42 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
|
|
|
|
|
| void LCodeGen::DoMathFloor(LMathFloor* instr) {
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - Register output_reg = ToRegister(instr->result());
|
| - XMMRegister input_reg = ToDoubleRegister(instr->value());
|
| -
|
| - if (CpuFeatures::IsSupported(SSE4_1)) {
|
| - CpuFeatureScope scope(masm(), SSE4_1);
|
| - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - // Deoptimize on negative zero.
|
| - Label non_zero;
|
| - __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
|
| - __ ucomisd(input_reg, xmm_scratch);
|
| - __ j(not_equal, &non_zero, Label::kNear);
|
| - __ movmskpd(output_reg, input_reg);
|
| - __ test(output_reg, Immediate(1));
|
| - DeoptimizeIf(not_zero, instr->environment());
|
| - __ bind(&non_zero);
|
| - }
|
| - __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
|
| - __ cvttsd2si(output_reg, Operand(xmm_scratch));
|
| - // Overflow is signalled with minint.
|
| - __ cmp(output_reg, 0x1);
|
| - DeoptimizeIf(overflow, instr->environment());
|
| - } else {
|
| - Label negative_sign, done;
|
| - // Deoptimize on unordered.
|
| - __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
|
| - __ ucomisd(input_reg, xmm_scratch);
|
| - DeoptimizeIf(parity_even, instr->environment());
|
| - __ j(below, &negative_sign, Label::kNear);
|
| -
|
| - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - // Check for negative zero.
|
| - Label positive_sign;
|
| - __ j(above, &positive_sign, Label::kNear);
|
| - __ movmskpd(output_reg, input_reg);
|
| - __ test(output_reg, Immediate(1));
|
| - DeoptimizeIf(not_zero, instr->environment());
|
| - __ Move(output_reg, Immediate(0));
|
| - __ jmp(&done, Label::kNear);
|
| - __ bind(&positive_sign);
|
| - }
|
| -
|
| - // Use truncating instruction (OK because input is positive).
|
| - __ cvttsd2si(output_reg, Operand(input_reg));
|
| - // Overflow is signalled with minint.
|
| - __ cmp(output_reg, 0x1);
|
| - DeoptimizeIf(overflow, instr->environment());
|
| - __ jmp(&done, Label::kNear);
|
| -
|
| - // Non-zero negative reaches here.
|
| - __ bind(&negative_sign);
|
| - // Truncate, then compare and compensate.
|
| - __ cvttsd2si(output_reg, Operand(input_reg));
|
| - __ Cvtsi2sd(xmm_scratch, output_reg);
|
| - __ ucomisd(input_reg, xmm_scratch);
|
| - __ j(equal, &done, Label::kNear);
|
| - __ sub(output_reg, Immediate(1));
|
| - DeoptimizeIf(overflow, instr->environment());
|
| -
|
| - __ bind(&done);
|
| - }
|
| + UNIMPLEMENTED();
|
| }
|
|
|
|
|
| void LCodeGen::DoMathRound(LMathRound* instr) {
|
| - Register output_reg = ToRegister(instr->result());
|
| - XMMRegister input_reg = ToDoubleRegister(instr->value());
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - XMMRegister input_temp = ToDoubleRegister(instr->temp());
|
| - ExternalReference one_half = ExternalReference::address_of_one_half();
|
| - ExternalReference minus_one_half =
|
| - ExternalReference::address_of_minus_one_half();
|
| -
|
| - Label done, round_to_zero, below_one_half, do_not_compensate;
|
| - Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
|
| -
|
| - __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
|
| - __ ucomisd(xmm_scratch, input_reg);
|
| - __ j(above, &below_one_half, Label::kNear);
|
| -
|
| - // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
|
| - __ addsd(xmm_scratch, input_reg);
|
| - __ cvttsd2si(output_reg, Operand(xmm_scratch));
|
| - // Overflow is signalled with minint.
|
| - __ cmp(output_reg, 0x1);
|
| - __ RecordComment("D2I conversion overflow");
|
| - DeoptimizeIf(overflow, instr->environment());
|
| - __ jmp(&done, dist);
|
| -
|
| - __ bind(&below_one_half);
|
| - __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
|
| - __ ucomisd(xmm_scratch, input_reg);
|
| - __ j(below_equal, &round_to_zero, Label::kNear);
|
| -
|
| - // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
|
| - // compare and compensate.
|
| - __ movaps(input_temp, input_reg); // Do not alter input_reg.
|
| - __ subsd(input_temp, xmm_scratch);
|
| - __ cvttsd2si(output_reg, Operand(input_temp));
|
| - // Catch minint due to overflow, and to prevent overflow when compensating.
|
| - __ cmp(output_reg, 0x1);
|
| - __ RecordComment("D2I conversion overflow");
|
| - DeoptimizeIf(overflow, instr->environment());
|
| -
|
| - __ Cvtsi2sd(xmm_scratch, output_reg);
|
| - __ ucomisd(xmm_scratch, input_temp);
|
| - __ j(equal, &done, dist);
|
| - __ sub(output_reg, Immediate(1));
|
| - // No overflow because we already ruled out minint.
|
| - __ jmp(&done, dist);
|
| -
|
| - __ bind(&round_to_zero);
|
| - // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
|
| - // we can ignore the difference between a result of -0 and +0.
|
| - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - // If the sign is positive, we return +0.
|
| - __ movmskpd(output_reg, input_reg);
|
| - __ test(output_reg, Immediate(1));
|
| - __ RecordComment("Minus zero");
|
| - DeoptimizeIf(not_zero, instr->environment());
|
| - }
|
| - __ Move(output_reg, Immediate(0));
|
| - __ bind(&done);
|
| + UNIMPLEMENTED();
|
| }
|
|
|
|
|
| void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
|
| - XMMRegister input_reg = ToDoubleRegister(instr->value());
|
| - ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
|
| - __ sqrtsd(input_reg, input_reg);
|
| + UNIMPLEMENTED();
|
| }
|
|
|
|
|
| void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - XMMRegister input_reg = ToDoubleRegister(instr->value());
|
| - Register scratch = ToRegister(instr->temp());
|
| - ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
|
| -
|
| - // Note that according to ECMA-262 15.8.2.13:
|
| - // Math.pow(-Infinity, 0.5) == Infinity
|
| - // Math.sqrt(-Infinity) == NaN
|
| - Label done, sqrt;
|
| - // Check base for -Infinity. According to IEEE-754, single-precision
|
| - // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
|
| - __ mov(scratch, 0xFF800000);
|
| - __ movd(xmm_scratch, scratch);
|
| - __ cvtss2sd(xmm_scratch, xmm_scratch);
|
| - __ ucomisd(input_reg, xmm_scratch);
|
| - // Comparing -Infinity with NaN results in "unordered", which sets the
|
| - // zero flag as if both were equal. However, it also sets the carry flag.
|
| - __ j(not_equal, &sqrt, Label::kNear);
|
| - __ j(carry, &sqrt, Label::kNear);
|
| - // If input is -Infinity, return Infinity.
|
| - __ xorps(input_reg, input_reg);
|
| - __ subsd(input_reg, xmm_scratch);
|
| - __ jmp(&done, Label::kNear);
|
| -
|
| - // Square root.
|
| - __ bind(&sqrt);
|
| - __ xorps(xmm_scratch, xmm_scratch);
|
| - __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
|
| - __ sqrtsd(input_reg, input_reg);
|
| - __ bind(&done);
|
| + UNIMPLEMENTED();
|
| }
|
|
|
|
|
| void LCodeGen::DoPower(LPower* instr) {
|
| - Representation exponent_type = instr->hydrogen()->right()->representation();
|
| - // Having marked this as a call, we can use any registers.
|
| - // Just make sure that the input/output registers are the expected ones.
|
| - ASSERT(!instr->right()->IsDoubleRegister() ||
|
| - ToDoubleRegister(instr->right()).is(xmm1));
|
| - ASSERT(!instr->right()->IsRegister() ||
|
| - ToRegister(instr->right()).is(eax));
|
| - ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
|
| - ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
|
| -
|
| - if (exponent_type.IsSmi()) {
|
| - MathPowStub stub(isolate(), MathPowStub::TAGGED);
|
| - __ CallStub(&stub);
|
| - } else if (exponent_type.IsTagged()) {
|
| - Label no_deopt;
|
| - __ JumpIfSmi(eax, &no_deopt);
|
| - __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
|
| - DeoptimizeIf(not_equal, instr->environment());
|
| - __ bind(&no_deopt);
|
| - MathPowStub stub(isolate(), MathPowStub::TAGGED);
|
| - __ CallStub(&stub);
|
| - } else if (exponent_type.IsInteger32()) {
|
| - MathPowStub stub(isolate(), MathPowStub::INTEGER);
|
| - __ CallStub(&stub);
|
| - } else {
|
| - ASSERT(exponent_type.IsDouble());
|
| - MathPowStub stub(isolate(), MathPowStub::DOUBLE);
|
| - __ CallStub(&stub);
|
| - }
|
| + UNIMPLEMENTED();
|
| }
|
|
|
|
|
| void LCodeGen::DoMathLog(LMathLog* instr) {
|
| - ASSERT(instr->value()->Equals(instr->result()));
|
| - XMMRegister input_reg = ToDoubleRegister(instr->value());
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - Label positive, done, zero;
|
| - __ xorps(xmm_scratch, xmm_scratch);
|
| - __ ucomisd(input_reg, xmm_scratch);
|
| - __ j(above, &positive, Label::kNear);
|
| - __ j(not_carry, &zero, Label::kNear);
|
| - ExternalReference nan =
|
| - ExternalReference::address_of_canonical_non_hole_nan();
|
| - __ movsd(input_reg, Operand::StaticVariable(nan));
|
| - __ jmp(&done, Label::kNear);
|
| - __ bind(&zero);
|
| - ExternalReference ninf =
|
| - ExternalReference::address_of_negative_infinity();
|
| - __ movsd(input_reg, Operand::StaticVariable(ninf));
|
| - __ jmp(&done, Label::kNear);
|
| - __ bind(&positive);
|
| - __ fldln2();
|
| - __ sub(Operand(esp), Immediate(kDoubleSize));
|
| - __ movsd(Operand(esp, 0), input_reg);
|
| - __ fld_d(Operand(esp, 0));
|
| - __ fyl2x();
|
| - __ fstp_d(Operand(esp, 0));
|
| - __ movsd(input_reg, Operand(esp, 0));
|
| - __ add(Operand(esp), Immediate(kDoubleSize));
|
| - __ bind(&done);
|
| + UNIMPLEMENTED();
|
| }
|
|
|
|
|
| void LCodeGen::DoMathClz32(LMathClz32* instr) {
|
| - Register input = ToRegister(instr->value());
|
| - Register result = ToRegister(instr->result());
|
| - Label not_zero_input;
|
| - __ bsr(result, input);
|
| -
|
| - __ j(not_zero, ¬_zero_input);
|
| - __ Move(result, Immediate(63)); // 63^31 == 32
|
| -
|
| - __ bind(¬_zero_input);
|
| - __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
|
| + UNIMPLEMENTED();
|
| }
|
|
|
|
|
| void LCodeGen::DoMathExp(LMathExp* instr) {
|
| - XMMRegister input = ToDoubleRegister(instr->value());
|
| - XMMRegister result = ToDoubleRegister(instr->result());
|
| - XMMRegister temp0 = double_scratch0();
|
| - Register temp1 = ToRegister(instr->temp1());
|
| - Register temp2 = ToRegister(instr->temp2());
|
| -
|
| - MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
|
| + UNIMPLEMENTED();
|
| }
|
|
|
|
|
| @@ -3920,7 +3830,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
|
|
|
| void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
|
| ASSERT(ToRegister(instr->context()).is(esi));
|
| - CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
|
| + CallRuntime(instr->function(), instr->arity(), instr);
|
| }
|
|
|
|
|
| @@ -3995,8 +3905,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
| ASSERT(access.IsInobject());
|
| ASSERT(!instr->hydrogen()->has_transition());
|
| ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
|
| - XMMRegister value = ToDoubleRegister(instr->value());
|
| - __ movsd(FieldOperand(object, offset), value);
|
| + X87Register value = ToX87Register(instr->value());
|
| + X87Mov(FieldOperand(object, offset), value);
|
| return;
|
| }
|
|
|
| @@ -4015,7 +3925,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
| HeapObject::kMapOffset,
|
| temp_map,
|
| temp,
|
| - kSaveFPRegs,
|
| OMIT_REMEMBERED_SET,
|
| OMIT_SMI_CHECK);
|
| }
|
| @@ -4056,7 +3965,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
| offset,
|
| value,
|
| temp,
|
| - kSaveFPRegs,
|
| EMIT_REMEMBERED_SET,
|
| check_needed);
|
| }
|
| @@ -4115,12 +4023,11 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
| instr->base_offset()));
|
| if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
|
| elements_kind == FLOAT32_ELEMENTS) {
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
|
| - __ movss(operand, xmm_scratch);
|
| + __ fld(0);
|
| + __ fstp_s(operand);
|
| } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
|
| elements_kind == FLOAT64_ELEMENTS) {
|
| - __ movsd(operand, ToDoubleRegister(instr->value()));
|
| + X87Mov(operand, ToX87Register(instr->value()));
|
| } else {
|
| Register value = ToRegister(instr->value());
|
| switch (elements_kind) {
|
| @@ -4173,19 +4080,50 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
|
| FAST_DOUBLE_ELEMENTS,
|
| instr->base_offset());
|
|
|
| - XMMRegister value = ToDoubleRegister(instr->value());
|
| -
|
| - if (instr->NeedsCanonicalization()) {
|
| - Label have_value;
|
| -
|
| - __ ucomisd(value, value);
|
| - __ j(parity_odd, &have_value, Label::kNear); // NaN.
|
| + // Can't use SSE2 in the serializer
|
| + if (instr->hydrogen()->IsConstantHoleStore()) {
|
| + // This means we should store the (double) hole. No floating point
|
| + // registers required.
|
| + double nan_double = FixedDoubleArray::hole_nan_as_double();
|
| + uint64_t int_val = BitCast<uint64_t, double>(nan_double);
|
| + int32_t lower = static_cast<int32_t>(int_val);
|
| + int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
|
| +
|
| + __ mov(double_store_operand, Immediate(lower));
|
| + Operand double_store_operand2 = BuildFastArrayOperand(
|
| + instr->elements(),
|
| + instr->key(),
|
| + instr->hydrogen()->key()->representation(),
|
| + FAST_DOUBLE_ELEMENTS,
|
| + instr->base_offset() + kPointerSize);
|
| + __ mov(double_store_operand2, Immediate(upper));
|
| + } else {
|
| + Label no_special_nan_handling;
|
| + X87Register value = ToX87Register(instr->value());
|
| + X87Fxch(value);
|
| +
|
| + if (instr->NeedsCanonicalization()) {
|
| + __ fld(0);
|
| + __ fld(0);
|
| + __ FCmp();
|
| +
|
| + __ j(parity_odd, &no_special_nan_handling, Label::kNear);
|
| + __ sub(esp, Immediate(kDoubleSize));
|
| + __ fst_d(MemOperand(esp, 0));
|
| + __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
|
| + Immediate(kHoleNanUpper32));
|
| + __ add(esp, Immediate(kDoubleSize));
|
| + Label canonicalize;
|
| + __ j(not_equal, &canonicalize, Label::kNear);
|
| + __ jmp(&no_special_nan_handling, Label::kNear);
|
| + __ bind(&canonicalize);
|
| + __ fstp(0);
|
| + __ fld_d(Operand::StaticVariable(canonical_nan_reference));
|
| + }
|
|
|
| - __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
|
| - __ bind(&have_value);
|
| + __ bind(&no_special_nan_handling);
|
| + __ fst_d(double_store_operand);
|
| }
|
| -
|
| - __ movsd(double_store_operand, value);
|
| }
|
|
|
|
|
| @@ -4225,7 +4163,6 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
|
| __ RecordWrite(elements,
|
| key,
|
| value,
|
| - kSaveFPRegs,
|
| EMIT_REMEMBERED_SET,
|
| check_needed);
|
| }
|
| @@ -4289,8 +4226,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
| // Write barrier.
|
| ASSERT_NE(instr->temp(), NULL);
|
| __ RecordWriteForMap(object_reg, to_map, new_map_reg,
|
| - ToRegister(instr->temp()),
|
| - kDontSaveFPRegs);
|
| + ToRegister(instr->temp()));
|
| } else {
|
| ASSERT(ToRegister(instr->context()).is(esi));
|
| ASSERT(object_reg.is(eax));
|
| @@ -4319,8 +4255,9 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
|
| class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
|
| public:
|
| DeferredStringCharCodeAt(LCodeGen* codegen,
|
| - LStringCharCodeAt* instr)
|
| - : LDeferredCode(codegen), instr_(instr) { }
|
| + LStringCharCodeAt* instr,
|
| + const X87Stack& x87_stack)
|
| + : LDeferredCode(codegen, x87_stack), instr_(instr) { }
|
| virtual void Generate() V8_OVERRIDE {
|
| codegen()->DoDeferredStringCharCodeAt(instr_);
|
| }
|
| @@ -4330,7 +4267,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
|
| };
|
|
|
| DeferredStringCharCodeAt* deferred =
|
| - new(zone()) DeferredStringCharCodeAt(this, instr);
|
| + new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
|
|
|
| StringCharLoadGenerator::Generate(masm(),
|
| factory(),
|
| @@ -4377,8 +4314,9 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
|
| class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
|
| public:
|
| DeferredStringCharFromCode(LCodeGen* codegen,
|
| - LStringCharFromCode* instr)
|
| - : LDeferredCode(codegen), instr_(instr) { }
|
| + LStringCharFromCode* instr,
|
| + const X87Stack& x87_stack)
|
| + : LDeferredCode(codegen, x87_stack), instr_(instr) { }
|
| virtual void Generate() V8_OVERRIDE {
|
| codegen()->DoDeferredStringCharFromCode(instr_);
|
| }
|
| @@ -4388,7 +4326,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
|
| };
|
|
|
| DeferredStringCharFromCode* deferred =
|
| - new(zone()) DeferredStringCharFromCode(this, instr);
|
| + new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
|
|
|
| ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
|
| Register char_code = ToRegister(instr->char_code());
|
| @@ -4440,17 +4378,24 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
|
| LOperand* output = instr->result();
|
| ASSERT(input->IsRegister() || input->IsStackSlot());
|
| ASSERT(output->IsDoubleRegister());
|
| - __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
|
| + if (input->IsRegister()) {
|
| + Register input_reg = ToRegister(input);
|
| + __ push(input_reg);
|
| + X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
|
| + __ pop(input_reg);
|
| + } else {
|
| + X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
|
| + }
|
| }
|
|
|
|
|
| void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
|
| LOperand* input = instr->value();
|
| LOperand* output = instr->result();
|
| - LOperand* temp = instr->temp();
|
| - __ LoadUint32(ToDoubleRegister(output),
|
| - ToRegister(input),
|
| - ToDoubleRegister(temp));
|
| + X87Register res = ToX87Register(output);
|
| + X87PrepareToWrite(res);
|
| + __ LoadUint32NoSSE2(ToRegister(input));
|
| + X87CommitWrite(res);
|
| }
|
|
|
|
|
| @@ -4458,11 +4403,12 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
| class DeferredNumberTagI V8_FINAL : public LDeferredCode {
|
| public:
|
| DeferredNumberTagI(LCodeGen* codegen,
|
| - LNumberTagI* instr)
|
| - : LDeferredCode(codegen), instr_(instr) { }
|
| + LNumberTagI* instr,
|
| + const X87Stack& x87_stack)
|
| + : LDeferredCode(codegen, x87_stack), instr_(instr) { }
|
| virtual void Generate() V8_OVERRIDE {
|
| codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
|
| - NULL, SIGNED_INT32);
|
| + SIGNED_INT32);
|
| }
|
| virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
| private:
|
| @@ -4474,7 +4420,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
| Register reg = ToRegister(input);
|
|
|
| DeferredNumberTagI* deferred =
|
| - new(zone()) DeferredNumberTagI(this, instr);
|
| + new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
|
| __ SmiTag(reg);
|
| __ j(overflow, deferred->entry());
|
| __ bind(deferred->exit());
|
| @@ -4484,11 +4430,13 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
| void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
|
| class DeferredNumberTagU V8_FINAL : public LDeferredCode {
|
| public:
|
| - DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
|
| - : LDeferredCode(codegen), instr_(instr) { }
|
| + DeferredNumberTagU(LCodeGen* codegen,
|
| + LNumberTagU* instr,
|
| + const X87Stack& x87_stack)
|
| + : LDeferredCode(codegen, x87_stack), instr_(instr) { }
|
| virtual void Generate() V8_OVERRIDE {
|
| - codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
|
| - instr_->temp2(), UNSIGNED_INT32);
|
| + codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
|
| + UNSIGNED_INT32);
|
| }
|
| virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
|
| private:
|
| @@ -4500,7 +4448,7 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
|
| Register reg = ToRegister(input);
|
|
|
| DeferredNumberTagU* deferred =
|
| - new(zone()) DeferredNumberTagU(this, instr);
|
| + new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
|
| __ cmp(reg, Immediate(Smi::kMaxValue));
|
| __ j(above, deferred->entry());
|
| __ SmiTag(reg);
|
| @@ -4510,13 +4458,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
|
|
|
| void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
|
| LOperand* value,
|
| - LOperand* temp1,
|
| - LOperand* temp2,
|
| + LOperand* temp,
|
| IntegerSignedness signedness) {
|
| Label done, slow;
|
| Register reg = ToRegister(value);
|
| - Register tmp = ToRegister(temp1);
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| + Register tmp = ToRegister(temp);
|
|
|
| if (signedness == SIGNED_INT32) {
|
| // There was overflow, so bits 30 and 31 of the original integer
|
| @@ -4524,9 +4470,17 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
|
| // the value in there. If that fails, call the runtime system.
|
| __ SmiUntag(reg);
|
| __ xor_(reg, 0x80000000);
|
| - __ Cvtsi2sd(xmm_scratch, Operand(reg));
|
| + __ push(reg);
|
| + __ fild_s(Operand(esp, 0));
|
| + __ pop(reg);
|
| } else {
|
| - __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2));
|
| + // There's no fild variant for unsigned values, so zero-extend to a 64-bit
|
| + // int manually.
|
| + __ push(Immediate(0));
|
| + __ push(reg);
|
| + __ fild_d(Operand(esp, 0));
|
| + __ pop(reg);
|
| + __ pop(reg);
|
| }
|
|
|
| if (FLAG_inline_new) {
|
| @@ -4551,24 +4505,24 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
|
| // The corresponding HChange instructions are added in a phase that does
|
| // not have easy access to the local context.
|
| __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| - __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
|
| + __ CallRuntime(Runtime::kHiddenAllocateHeapNumber);
|
| RecordSafepointWithRegisters(
|
| instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
| __ StoreToSafepointRegisterSlot(reg, eax);
|
| }
|
|
|
| - // Done. Put the value in xmm_scratch into the value of the allocated heap
|
| - // number.
|
| __ bind(&done);
|
| - __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
|
| + __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
|
| }
|
|
|
|
|
| void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
| class DeferredNumberTagD V8_FINAL : public LDeferredCode {
|
| public:
|
| - DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
|
| - : LDeferredCode(codegen), instr_(instr) { }
|
| + DeferredNumberTagD(LCodeGen* codegen,
|
| + LNumberTagD* instr,
|
| + const X87Stack& x87_stack)
|
| + : LDeferredCode(codegen, x87_stack), instr_(instr) { }
|
| virtual void Generate() V8_OVERRIDE {
|
| codegen()->DoDeferredNumberTagD(instr_);
|
| }
|
| @@ -4579,8 +4533,12 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
|
|
| Register reg = ToRegister(instr->result());
|
|
|
| + // Put the value to the top of stack
|
| + X87Register src = ToX87Register(instr->value());
|
| + X87LoadForUsage(src);
|
| +
|
| DeferredNumberTagD* deferred =
|
| - new(zone()) DeferredNumberTagD(this, instr);
|
| + new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
|
| if (FLAG_inline_new) {
|
| Register tmp = ToRegister(instr->temp());
|
| __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
|
| @@ -4588,8 +4546,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
| __ jmp(deferred->entry());
|
| }
|
| __ bind(deferred->exit());
|
| - XMMRegister input_reg = ToDoubleRegister(instr->value());
|
| - __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
|
| + __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
|
| }
|
|
|
|
|
| @@ -4607,7 +4564,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
|
| // The corresponding HChange instructions are added in a phase that does
|
| // not have easy access to the local context.
|
| __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| - __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
|
| + __ CallRuntime(Runtime::kHiddenAllocateHeapNumber);
|
| RecordSafepointWithRegisters(
|
| instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
| __ StoreToSafepointRegisterSlot(reg, eax);
|
| @@ -4644,15 +4601,16 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
|
| }
|
|
|
|
|
| -void LCodeGen::EmitNumberUntagD(Register input_reg,
|
| - Register temp_reg,
|
| - XMMRegister result_reg,
|
| - bool can_convert_undefined_to_nan,
|
| - bool deoptimize_on_minus_zero,
|
| - LEnvironment* env,
|
| - NumberUntagDMode mode) {
|
| - Label convert, load_smi, done;
|
| +void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
|
| + Register temp_reg,
|
| + X87Register res_reg,
|
| + bool can_convert_undefined_to_nan,
|
| + bool deoptimize_on_minus_zero,
|
| + LEnvironment* env,
|
| + NumberUntagDMode mode) {
|
| + Label load_smi, done;
|
|
|
| + X87PrepareToWrite(res_reg);
|
| if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
|
| // Smi check.
|
| __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
|
| @@ -4660,49 +4618,56 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
| // Heap number map check.
|
| __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
|
| factory()->heap_number_map());
|
| - if (can_convert_undefined_to_nan) {
|
| - __ j(not_equal, &convert, Label::kNear);
|
| - } else {
|
| + if (!can_convert_undefined_to_nan) {
|
| DeoptimizeIf(not_equal, env);
|
| - }
|
| -
|
| - // Heap number to XMM conversion.
|
| - __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| -
|
| - if (deoptimize_on_minus_zero) {
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - __ xorps(xmm_scratch, xmm_scratch);
|
| - __ ucomisd(result_reg, xmm_scratch);
|
| - __ j(not_zero, &done, Label::kNear);
|
| - __ movmskpd(temp_reg, result_reg);
|
| - __ test_b(temp_reg, 1);
|
| - DeoptimizeIf(not_zero, env);
|
| - }
|
| - __ jmp(&done, Label::kNear);
|
| -
|
| - if (can_convert_undefined_to_nan) {
|
| - __ bind(&convert);
|
| + } else {
|
| + Label heap_number, convert;
|
| + __ j(equal, &heap_number, Label::kNear);
|
|
|
| - // Convert undefined (and hole) to NaN.
|
| + // Convert undefined (or hole) to NaN.
|
| __ cmp(input_reg, factory()->undefined_value());
|
| DeoptimizeIf(not_equal, env);
|
|
|
| + __ bind(&convert);
|
| ExternalReference nan =
|
| ExternalReference::address_of_canonical_non_hole_nan();
|
| - __ movsd(result_reg, Operand::StaticVariable(nan));
|
| + __ fld_d(Operand::StaticVariable(nan));
|
| __ jmp(&done, Label::kNear);
|
| +
|
| + __ bind(&heap_number);
|
| }
|
| + // Heap number to x87 conversion.
|
| + __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| + if (deoptimize_on_minus_zero) {
|
| + __ fldz();
|
| + __ FCmp();
|
| + __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| + __ j(not_zero, &done, Label::kNear);
|
| +
|
| + // Use general purpose registers to check if we have -0.0
|
| + __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
|
| + __ test(temp_reg, Immediate(HeapNumber::kSignMask));
|
| + __ j(zero, &done, Label::kNear);
|
| +
|
| + // Pop FPU stack before deoptimizing.
|
| + __ fstp(0);
|
| + DeoptimizeIf(not_zero, env);
|
| + }
|
| + __ jmp(&done, Label::kNear);
|
| } else {
|
| ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
|
| }
|
|
|
| __ bind(&load_smi);
|
| - // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
|
| + // Clobbering a temp is faster than re-tagging the
|
| // input register since we avoid dependencies.
|
| __ mov(temp_reg, input_reg);
|
| __ SmiUntag(temp_reg); // Untag smi before converting to float.
|
| - __ Cvtsi2sd(result_reg, Operand(temp_reg));
|
| + __ push(temp_reg);
|
| + __ fild_s(Operand(esp, 0));
|
| + __ add(esp, Immediate(kPointerSize));
|
| __ bind(&done);
|
| + X87CommitWrite(res_reg);
|
| }
|
|
|
|
|
| @@ -4744,10 +4709,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
|
| __ Move(input_reg, Immediate(0));
|
| } else {
|
| Label bailout;
|
| - XMMRegister scratch = (instr->temp() != NULL)
|
| - ? ToDoubleRegister(instr->temp())
|
| - : no_xmm_reg;
|
| - __ TaggedToI(input_reg, input_reg, scratch,
|
| + __ TaggedToI(input_reg, input_reg,
|
| instr->hydrogen()->GetMinusZeroMode(), &bailout);
|
| __ jmp(done);
|
| __ bind(&bailout);
|
| @@ -4759,8 +4721,10 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
|
| void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
|
| class DeferredTaggedToI V8_FINAL : public LDeferredCode {
|
| public:
|
| - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
|
| - : LDeferredCode(codegen), instr_(instr) { }
|
| + DeferredTaggedToI(LCodeGen* codegen,
|
| + LTaggedToI* instr,
|
| + const X87Stack& x87_stack)
|
| + : LDeferredCode(codegen, x87_stack), instr_(instr) { }
|
| virtual void Generate() V8_OVERRIDE {
|
| codegen()->DoDeferredTaggedToI(instr_, done());
|
| }
|
| @@ -4778,7 +4742,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
|
| __ SmiUntag(input_reg);
|
| } else {
|
| DeferredTaggedToI* deferred =
|
| - new(zone()) DeferredTaggedToI(this, instr);
|
| + new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
|
| // Optimistically untag the input.
|
| // If the input is a HeapObject, SmiUntag will set the carry flag.
|
| STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
|
| @@ -4808,14 +4772,13 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
|
| NumberUntagDMode mode = value->representation().IsSmi()
|
| ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
|
|
|
| - XMMRegister result_reg = ToDoubleRegister(result);
|
| - EmitNumberUntagD(input_reg,
|
| - temp_reg,
|
| - result_reg,
|
| - instr->hydrogen()->can_convert_undefined_to_nan(),
|
| - deoptimize_on_minus_zero,
|
| - instr->environment(),
|
| - mode);
|
| + EmitNumberUntagDNoSSE2(input_reg,
|
| + temp_reg,
|
| + ToX87Register(result),
|
| + instr->hydrogen()->can_convert_undefined_to_nan(),
|
| + deoptimize_on_minus_zero,
|
| + instr->environment(),
|
| + mode);
|
| }
|
|
|
|
|
| @@ -4827,14 +4790,15 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
| Register result_reg = ToRegister(result);
|
|
|
| if (instr->truncating()) {
|
| - XMMRegister input_reg = ToDoubleRegister(input);
|
| - __ TruncateDoubleToI(result_reg, input_reg);
|
| + X87Register input_reg = ToX87Register(input);
|
| + X87Fxch(input_reg);
|
| + __ TruncateX87TOSToI(result_reg);
|
| } else {
|
| Label bailout, done;
|
| - XMMRegister input_reg = ToDoubleRegister(input);
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - __ DoubleToI(result_reg, input_reg, xmm_scratch,
|
| - instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
|
| + X87Register input_reg = ToX87Register(input);
|
| + X87Fxch(input_reg);
|
| + __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
|
| + &bailout, Label::kNear);
|
| __ jmp(&done, Label::kNear);
|
| __ bind(&bailout);
|
| DeoptimizeIf(no_condition, instr->environment());
|
| @@ -4851,10 +4815,10 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
|
| Register result_reg = ToRegister(result);
|
|
|
| Label bailout, done;
|
| - XMMRegister input_reg = ToDoubleRegister(input);
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - __ DoubleToI(result_reg, input_reg, xmm_scratch,
|
| - instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
|
| + X87Register input_reg = ToX87Register(input);
|
| + X87Fxch(input_reg);
|
| + __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
|
| + &bailout, Label::kNear);
|
| __ jmp(&done, Label::kNear);
|
| __ bind(&bailout);
|
| DeoptimizeIf(no_condition, instr->environment());
|
| @@ -4945,7 +4909,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
|
| PushSafepointRegistersScope scope(this);
|
| __ push(object);
|
| __ xor_(esi, esi);
|
| - __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
|
| + __ CallRuntime(Runtime::kTryMigrateInstance);
|
| RecordSafepointWithRegisters(
|
| instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
|
|
|
| @@ -4958,8 +4922,11 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
|
| void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
| class DeferredCheckMaps V8_FINAL : public LDeferredCode {
|
| public:
|
| - DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
|
| - : LDeferredCode(codegen), instr_(instr), object_(object) {
|
| + DeferredCheckMaps(LCodeGen* codegen,
|
| + LCheckMaps* instr,
|
| + Register object,
|
| + const X87Stack& x87_stack)
|
| + : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
|
| SetExit(check_maps());
|
| }
|
| virtual void Generate() V8_OVERRIDE {
|
| @@ -4987,7 +4954,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
|
|
| DeferredCheckMaps* deferred = NULL;
|
| if (instr->hydrogen()->HasMigrationTarget()) {
|
| - deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
|
| + deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
|
| __ bind(deferred->check_maps());
|
| }
|
|
|
| @@ -5012,10 +4979,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
|
|
|
|
| void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
|
| - XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - Register result_reg = ToRegister(instr->result());
|
| - __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
|
| + UNREACHABLE();
|
| }
|
|
|
|
|
| @@ -5026,12 +4990,14 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
|
| }
|
|
|
|
|
| -void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
| - ASSERT(instr->unclamped()->Equals(instr->result()));
|
| +void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
|
| Register input_reg = ToRegister(instr->unclamped());
|
| - XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - Label is_smi, done, heap_number;
|
| + Register result_reg = ToRegister(instr->result());
|
| + Register scratch = ToRegister(instr->scratch());
|
| + Register scratch2 = ToRegister(instr->scratch2());
|
| + Register scratch3 = ToRegister(instr->scratch3());
|
| + Label is_smi, done, heap_number, valid_exponent,
|
| + largest_value, zero_result, maybe_nan_or_infinity;
|
|
|
| __ JumpIfSmi(input_reg, &is_smi);
|
|
|
| @@ -5044,65 +5010,125 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
| // conversions.
|
| __ cmp(input_reg, factory()->undefined_value());
|
| DeoptimizeIf(not_equal, instr->environment());
|
| - __ mov(input_reg, 0);
|
| - __ jmp(&done, Label::kNear);
|
| + __ jmp(&zero_result, Label::kNear);
|
|
|
| // Heap number
|
| __ bind(&heap_number);
|
| - __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| - __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
|
| +
|
| + // Surprisingly, all of the hand-crafted bit-manipulations below are much
|
| + // faster than the x86 FPU built-in instruction, especially since "banker's
|
| + // rounding" would be additionally very expensive
|
| +
|
| + // Get exponent word.
|
| + __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
|
| + __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
|
| +
|
| + // Test for negative values --> clamp to zero
|
| + __ test(scratch, scratch);
|
| + __ j(negative, &zero_result, Label::kNear);
|
| +
|
| + // Get exponent alone in scratch2.
|
| + __ mov(scratch2, scratch);
|
| + __ and_(scratch2, HeapNumber::kExponentMask);
|
| + __ shr(scratch2, HeapNumber::kExponentShift);
|
| + __ j(zero, &zero_result, Label::kNear);
|
| + __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
|
| + __ j(negative, &zero_result, Label::kNear);
|
| +
|
| + const uint32_t non_int8_exponent = 7;
|
| + __ cmp(scratch2, Immediate(non_int8_exponent + 1));
|
| + // If the exponent is too big, check for special values.
|
| + __ j(greater, &maybe_nan_or_infinity, Label::kNear);
|
| +
|
| + __ bind(&valid_exponent);
|
| + // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
|
| + // < 7. The shift bias is the number of bits to shift the mantissa such that
|
| + // with an exponent of 7 such the that top-most one is in bit 30, allowing
|
| + // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
|
| + // 1).
|
| + int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
|
| + __ lea(result_reg, MemOperand(scratch2, shift_bias));
|
| + // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
|
| + // top bits of the mantissa.
|
| + __ and_(scratch, HeapNumber::kMantissaMask);
|
| + // Put back the implicit 1 of the mantissa
|
| + __ or_(scratch, 1 << HeapNumber::kExponentShift);
|
| + // Shift up to round
|
| + __ shl_cl(scratch);
|
| + // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
|
| + // use the bit in the "ones" place and add it to the "halves" place, which has
|
| + // the effect of rounding to even.
|
| + __ mov(scratch2, scratch);
|
| + const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
|
| + const uint32_t one_bit_shift = one_half_bit_shift + 1;
|
| + __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
|
| + __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
|
| + Label no_round;
|
| + __ j(less, &no_round, Label::kNear);
|
| + Label round_up;
|
| + __ mov(scratch2, Immediate(1 << one_half_bit_shift));
|
| + __ j(greater, &round_up, Label::kNear);
|
| + __ test(scratch3, scratch3);
|
| + __ j(not_zero, &round_up, Label::kNear);
|
| + __ mov(scratch2, scratch);
|
| + __ and_(scratch2, Immediate(1 << one_bit_shift));
|
| + __ shr(scratch2, 1);
|
| + __ bind(&round_up);
|
| + __ add(scratch, scratch2);
|
| + __ j(overflow, &largest_value, Label::kNear);
|
| + __ bind(&no_round);
|
| + __ shr(scratch, 23);
|
| + __ mov(result_reg, scratch);
|
| + __ jmp(&done, Label::kNear);
|
| +
|
| + __ bind(&maybe_nan_or_infinity);
|
| + // Check for NaN/Infinity, all other values map to 255
|
| + __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
|
| + __ j(not_equal, &largest_value, Label::kNear);
|
| +
|
| + // Check for NaN, which differs from Infinity in that at least one mantissa
|
| + // bit is set.
|
| + __ and_(scratch, HeapNumber::kMantissaMask);
|
| + __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
|
| + __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
|
| + // Infinity -> Fall through to map to 255.
|
| +
|
| + __ bind(&largest_value);
|
| + __ mov(result_reg, Immediate(255));
|
| + __ jmp(&done, Label::kNear);
|
| +
|
| + __ bind(&zero_result);
|
| + __ xor_(result_reg, result_reg);
|
| __ jmp(&done, Label::kNear);
|
|
|
| // smi
|
| __ bind(&is_smi);
|
| - __ SmiUntag(input_reg);
|
| - __ ClampUint8(input_reg);
|
| + if (!input_reg.is(result_reg)) {
|
| + __ mov(result_reg, input_reg);
|
| + }
|
| + __ SmiUntag(result_reg);
|
| + __ ClampUint8(result_reg);
|
| __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
|
| - XMMRegister value_reg = ToDoubleRegister(instr->value());
|
| - Register result_reg = ToRegister(instr->result());
|
| - if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
|
| - if (CpuFeatures::IsSupported(SSE4_1)) {
|
| - CpuFeatureScope scope2(masm(), SSE4_1);
|
| - __ pextrd(result_reg, value_reg, 1);
|
| - } else {
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - __ pshufd(xmm_scratch, value_reg, 1);
|
| - __ movd(result_reg, xmm_scratch);
|
| - }
|
| - } else {
|
| - __ movd(result_reg, value_reg);
|
| - }
|
| + UNREACHABLE();
|
| }
|
|
|
|
|
| void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
|
| - Register hi_reg = ToRegister(instr->hi());
|
| - Register lo_reg = ToRegister(instr->lo());
|
| - XMMRegister result_reg = ToDoubleRegister(instr->result());
|
| -
|
| - if (CpuFeatures::IsSupported(SSE4_1)) {
|
| - CpuFeatureScope scope2(masm(), SSE4_1);
|
| - __ movd(result_reg, lo_reg);
|
| - __ pinsrd(result_reg, hi_reg, 1);
|
| - } else {
|
| - XMMRegister xmm_scratch = double_scratch0();
|
| - __ movd(result_reg, hi_reg);
|
| - __ psllq(result_reg, 32);
|
| - __ movd(xmm_scratch, lo_reg);
|
| - __ orps(result_reg, xmm_scratch);
|
| - }
|
| + UNREACHABLE();
|
| }
|
|
|
|
|
| void LCodeGen::DoAllocate(LAllocate* instr) {
|
| class DeferredAllocate V8_FINAL : public LDeferredCode {
|
| public:
|
| - DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
|
| - : LDeferredCode(codegen), instr_(instr) { }
|
| + DeferredAllocate(LCodeGen* codegen,
|
| + LAllocate* instr,
|
| + const X87Stack& x87_stack)
|
| + : LDeferredCode(codegen, x87_stack), instr_(instr) { }
|
| virtual void Generate() V8_OVERRIDE {
|
| codegen()->DoDeferredAllocate(instr_);
|
| }
|
| @@ -5111,7 +5137,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
|
| LAllocate* instr_;
|
| };
|
|
|
| - DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
|
| + DeferredAllocate* deferred =
|
| + new(zone()) DeferredAllocate(this, instr, x87_stack_);
|
|
|
| Register result = ToRegister(instr->result());
|
| Register temp = ToRegister(instr->temp());
|
| @@ -5464,7 +5491,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
|
| void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
|
| PushSafepointRegistersScope scope(this);
|
| __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| - __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
|
| + __ CallRuntime(Runtime::kHiddenStackGuard);
|
| RecordSafepointWithLazyDeopt(
|
| instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
|
| ASSERT(instr->HasEnvironment());
|
| @@ -5476,8 +5503,10 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
|
| void LCodeGen::DoStackCheck(LStackCheck* instr) {
|
| class DeferredStackCheck V8_FINAL : public LDeferredCode {
|
| public:
|
| - DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
|
| - : LDeferredCode(codegen), instr_(instr) { }
|
| + DeferredStackCheck(LCodeGen* codegen,
|
| + LStackCheck* instr,
|
| + const X87Stack& x87_stack)
|
| + : LDeferredCode(codegen, x87_stack), instr_(instr) { }
|
| virtual void Generate() V8_OVERRIDE {
|
| codegen()->DoDeferredStackCheck(instr_);
|
| }
|
| @@ -5508,7 +5537,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
|
| ASSERT(instr->hydrogen()->is_backwards_branch());
|
| // Perform stack overflow check if this goto needs it before jumping.
|
| DeferredStackCheck* deferred_stack_check =
|
| - new(zone()) DeferredStackCheck(this, instr);
|
| + new(zone()) DeferredStackCheck(this, instr, x87_stack_);
|
| ExternalReference stack_limit =
|
| ExternalReference::address_of_stack_limit(isolate());
|
| __ cmp(esp, Operand::StaticVariable(stack_limit));
|
| @@ -5609,7 +5638,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
|
| __ push(object);
|
| __ push(index);
|
| __ xor_(esi, esi);
|
| - __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
|
| + __ CallRuntime(Runtime::kLoadMutableDouble);
|
| RecordSafepointWithRegisters(
|
| instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
|
| __ StoreToSafepointRegisterSlot(object, eax);
|
| @@ -5622,8 +5651,9 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
|
| DeferredLoadMutableDouble(LCodeGen* codegen,
|
| LLoadFieldByIndex* instr,
|
| Register object,
|
| - Register index)
|
| - : LDeferredCode(codegen),
|
| + Register index,
|
| + const X87Stack& x87_stack)
|
| + : LDeferredCode(codegen, x87_stack),
|
| instr_(instr),
|
| object_(object),
|
| index_(index) {
|
| @@ -5643,7 +5673,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
|
|
|
| DeferredLoadMutableDouble* deferred;
|
| deferred = new(zone()) DeferredLoadMutableDouble(
|
| - this, instr, object, index);
|
| + this, instr, object, index, x87_stack_);
|
|
|
| Label out_of_object, done;
|
| __ test(index, Immediate(Smi::FromInt(1)));
|
| @@ -5676,4 +5706,4 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
|
|
|
| } } // namespace v8::internal
|
|
|
| -#endif // V8_TARGET_ARCH_IA32
|
| +#endif // V8_TARGET_ARCH_X87
|
|
|