Index: src/x87/code-stubs-x87.cc |
diff --git a/src/ia32/code-stubs-ia32.cc b/src/x87/code-stubs-x87.cc |
similarity index 90% |
copy from src/ia32/code-stubs-ia32.cc |
copy to src/x87/code-stubs-x87.cc |
index 5547ba25e86294c7838a7885f0374535f4afb76c..2c95ad1a4837115c8e46b81f129961a424a21563 100644 |
--- a/src/ia32/code-stubs-ia32.cc |
+++ b/src/x87/code-stubs-x87.cc |
@@ -4,7 +4,7 @@ |
#include "v8.h" |
-#if V8_TARGET_ARCH_IA32 |
+#if V8_TARGET_ARCH_X87 |
#include "bootstrapper.h" |
#include "code-stubs.h" |
@@ -64,9 +64,9 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( |
descriptor->register_param_count_ = 3; |
descriptor->register_params_ = registers; |
static Representation representations[] = { |
- Representation::Tagged(), |
- Representation::Smi(), |
- Representation::Tagged() }; |
+ Representation::Tagged(), |
+ Representation::Smi(), |
+ Representation::Tagged() }; |
descriptor->register_param_representations_ = representations; |
descriptor->deoptimization_handler_ = |
Runtime::FunctionForId( |
@@ -479,13 +479,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
// store the registers in any particular way, but we do have to store and |
// restore them. |
__ pushad(); |
- if (save_doubles_ == kSaveFPRegs) { |
- __ sub(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); |
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { |
- XMMRegister reg = XMMRegister::from_code(i); |
- __ movsd(Operand(esp, i * kDoubleSize), reg); |
- } |
- } |
const int argument_count = 1; |
AllowExternalCallThatCantCauseGC scope(masm); |
@@ -495,13 +488,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
__ CallCFunction( |
ExternalReference::store_buffer_overflow_function(isolate()), |
argument_count); |
- if (save_doubles_ == kSaveFPRegs) { |
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { |
- XMMRegister reg = XMMRegister::from_code(i); |
- __ movsd(reg, Operand(esp, i * kDoubleSize)); |
- } |
- __ add(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); |
- } |
__ popad(); |
__ ret(0); |
} |
@@ -526,12 +512,6 @@ class FloatingPointHelper : public AllStatic { |
static void CheckFloatOperands(MacroAssembler* masm, |
Label* non_float, |
Register scratch); |
- |
- // Test if operands are numbers (smi or HeapNumber objects), and load |
- // them into xmm0 and xmm1 if they are. Jump to label not_numbers if |
- // either operand is not a number. Operands are in edx and eax. |
- // Leaves operands unchanged. |
- static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); |
}; |
@@ -571,11 +551,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { |
bool stash_exponent_copy = !input_reg.is(esp); |
__ mov(scratch1, mantissa_operand); |
- if (CpuFeatures::IsSupported(SSE3)) { |
- CpuFeatureScope scope(masm, SSE3); |
- // Load x87 register with heap number. |
- __ fld_d(mantissa_operand); |
- } |
__ mov(ecx, exponent_operand); |
if (stash_exponent_copy) __ push(ecx); |
@@ -587,9 +562,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { |
// Result is entirely in lower 32-bits of mantissa |
int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize; |
- if (CpuFeatures::IsSupported(SSE3)) { |
- __ fstp(0); |
- } |
__ sub(ecx, Immediate(delta)); |
__ xor_(result_reg, result_reg); |
__ cmp(ecx, Immediate(31)); |
@@ -598,38 +570,26 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { |
__ jmp(&check_negative); |
__ bind(&process_64_bits); |
- if (CpuFeatures::IsSupported(SSE3)) { |
- CpuFeatureScope scope(masm, SSE3); |
- if (stash_exponent_copy) { |
- // Already a copy of the exponent on the stack, overwrite it. |
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); |
- __ sub(esp, Immediate(kDoubleSize / 2)); |
- } else { |
- // Reserve space for 64 bit answer. |
- __ sub(esp, Immediate(kDoubleSize)); // Nolint. |
- } |
- // Do conversion, which cannot fail because we checked the exponent. |
- __ fisttp_d(Operand(esp, 0)); |
- __ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result |
- __ add(esp, Immediate(kDoubleSize)); |
- __ jmp(&done_no_stash); |
+ // Result must be extracted from shifted 32-bit mantissa |
+ __ sub(ecx, Immediate(delta)); |
+ __ neg(ecx); |
+ if (stash_exponent_copy) { |
+ __ mov(result_reg, MemOperand(esp, 0)); |
} else { |
- // Result must be extracted from shifted 32-bit mantissa |
- __ sub(ecx, Immediate(delta)); |
- __ neg(ecx); |
- if (stash_exponent_copy) { |
- __ mov(result_reg, MemOperand(esp, 0)); |
- } else { |
- __ mov(result_reg, exponent_operand); |
- } |
- __ and_(result_reg, |
- Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32))); |
- __ add(result_reg, |
- Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32))); |
- __ shrd(result_reg, scratch1); |
- __ shr_cl(result_reg); |
- __ test(ecx, Immediate(32)); |
- __ cmov(not_equal, scratch1, result_reg); |
+ __ mov(result_reg, exponent_operand); |
+ } |
+ __ and_(result_reg, |
+ Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32))); |
+ __ add(result_reg, |
+ Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32))); |
+ __ shrd(result_reg, scratch1); |
+ __ shr_cl(result_reg); |
+ __ test(ecx, Immediate(32)); |
+ { |
+ Label skip_mov; |
+ __ j(equal, &skip_mov, Label::kNear); |
+ __ mov(scratch1, result_reg); |
+ __ bind(&skip_mov); |
} |
// If the double was negative, negate the integer result. |
@@ -641,7 +601,12 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { |
} else { |
__ cmp(exponent_operand, Immediate(0)); |
} |
- __ cmov(greater, result_reg, scratch1); |
+ { |
+ Label skip_mov; |
+ __ j(less_equal, &skip_mov, Label::kNear); |
+ __ mov(result_reg, scratch1); |
+ __ bind(&skip_mov); |
+ } |
// Restore registers |
__ bind(&done); |
@@ -677,37 +642,6 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, |
} |
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, |
- Label* not_numbers) { |
- Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done; |
- // Load operand in edx into xmm0, or branch to not_numbers. |
- __ JumpIfSmi(edx, &load_smi_edx, Label::kNear); |
- Factory* factory = masm->isolate()->factory(); |
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map()); |
- __ j(not_equal, not_numbers); // Argument in edx is not a number. |
- __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); |
- __ bind(&load_eax); |
- // Load operand in eax into xmm1, or branch to not_numbers. |
- __ JumpIfSmi(eax, &load_smi_eax, Label::kNear); |
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map()); |
- __ j(equal, &load_float_eax, Label::kNear); |
- __ jmp(not_numbers); // Argument in eax is not a number. |
- __ bind(&load_smi_edx); |
- __ SmiUntag(edx); // Untag smi before converting to float. |
- __ Cvtsi2sd(xmm0, edx); |
- __ SmiTag(edx); // Retag smi for heap number overwriting test. |
- __ jmp(&load_eax); |
- __ bind(&load_smi_eax); |
- __ SmiUntag(eax); // Untag smi before converting to float. |
- __ Cvtsi2sd(xmm1, eax); |
- __ SmiTag(eax); // Retag smi for heap number overwriting test. |
- __ jmp(&done, Label::kNear); |
- __ bind(&load_float_eax); |
- __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); |
- __ bind(&done); |
-} |
- |
- |
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, |
Label* non_float, |
Register scratch) { |
@@ -732,269 +666,8 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, |
void MathPowStub::Generate(MacroAssembler* masm) { |
- Factory* factory = isolate()->factory(); |
- const Register exponent = eax; |
- const Register base = edx; |
- const Register scratch = ecx; |
- const XMMRegister double_result = xmm3; |
- const XMMRegister double_base = xmm2; |
- const XMMRegister double_exponent = xmm1; |
- const XMMRegister double_scratch = xmm4; |
- |
- Label call_runtime, done, exponent_not_smi, int_exponent; |
- |
- // Save 1 in double_result - we need this several times later on. |
- __ mov(scratch, Immediate(1)); |
- __ Cvtsi2sd(double_result, scratch); |
- |
- if (exponent_type_ == ON_STACK) { |
- Label base_is_smi, unpack_exponent; |
- // The exponent and base are supplied as arguments on the stack. |
- // This can only happen if the stub is called from non-optimized code. |
- // Load input parameters from stack. |
- __ mov(base, Operand(esp, 2 * kPointerSize)); |
- __ mov(exponent, Operand(esp, 1 * kPointerSize)); |
- |
- __ JumpIfSmi(base, &base_is_smi, Label::kNear); |
- __ cmp(FieldOperand(base, HeapObject::kMapOffset), |
- factory->heap_number_map()); |
- __ j(not_equal, &call_runtime); |
- |
- __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset)); |
- __ jmp(&unpack_exponent, Label::kNear); |
- |
- __ bind(&base_is_smi); |
- __ SmiUntag(base); |
- __ Cvtsi2sd(double_base, base); |
- |
- __ bind(&unpack_exponent); |
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); |
- __ SmiUntag(exponent); |
- __ jmp(&int_exponent); |
- |
- __ bind(&exponent_not_smi); |
- __ cmp(FieldOperand(exponent, HeapObject::kMapOffset), |
- factory->heap_number_map()); |
- __ j(not_equal, &call_runtime); |
- __ movsd(double_exponent, |
- FieldOperand(exponent, HeapNumber::kValueOffset)); |
- } else if (exponent_type_ == TAGGED) { |
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); |
- __ SmiUntag(exponent); |
- __ jmp(&int_exponent); |
- |
- __ bind(&exponent_not_smi); |
- __ movsd(double_exponent, |
- FieldOperand(exponent, HeapNumber::kValueOffset)); |
- } |
- |
- if (exponent_type_ != INTEGER) { |
- Label fast_power, try_arithmetic_simplification; |
- __ DoubleToI(exponent, double_exponent, double_scratch, |
- TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification); |
- __ jmp(&int_exponent); |
- |
- __ bind(&try_arithmetic_simplification); |
- // Skip to runtime if possibly NaN (indicated by the indefinite integer). |
- __ cvttsd2si(exponent, Operand(double_exponent)); |
- __ cmp(exponent, Immediate(0x1)); |
- __ j(overflow, &call_runtime); |
- |
- if (exponent_type_ == ON_STACK) { |
- // Detect square root case. Crankshaft detects constant +/-0.5 at |
- // compile time and uses DoMathPowHalf instead. We then skip this check |
- // for non-constant cases of +/-0.5 as these hardly occur. |
- Label continue_sqrt, continue_rsqrt, not_plus_half; |
- // Test for 0.5. |
- // Load double_scratch with 0.5. |
- __ mov(scratch, Immediate(0x3F000000u)); |
- __ movd(double_scratch, scratch); |
- __ cvtss2sd(double_scratch, double_scratch); |
- // Already ruled out NaNs for exponent. |
- __ ucomisd(double_scratch, double_exponent); |
- __ j(not_equal, ¬_plus_half, Label::kNear); |
- |
- // Calculates square root of base. Check for the special case of |
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). |
- // According to IEEE-754, single-precision -Infinity has the highest |
- // 9 bits set and the lowest 23 bits cleared. |
- __ mov(scratch, 0xFF800000u); |
- __ movd(double_scratch, scratch); |
- __ cvtss2sd(double_scratch, double_scratch); |
- __ ucomisd(double_base, double_scratch); |
- // Comparing -Infinity with NaN results in "unordered", which sets the |
- // zero flag as if both were equal. However, it also sets the carry flag. |
- __ j(not_equal, &continue_sqrt, Label::kNear); |
- __ j(carry, &continue_sqrt, Label::kNear); |
- |
- // Set result to Infinity in the special case. |
- __ xorps(double_result, double_result); |
- __ subsd(double_result, double_scratch); |
- __ jmp(&done); |
- |
- __ bind(&continue_sqrt); |
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0. |
- __ xorps(double_scratch, double_scratch); |
- __ addsd(double_scratch, double_base); // Convert -0 to +0. |
- __ sqrtsd(double_result, double_scratch); |
- __ jmp(&done); |
- |
- // Test for -0.5. |
- __ bind(¬_plus_half); |
- // Load double_exponent with -0.5 by substracting 1. |
- __ subsd(double_scratch, double_result); |
- // Already ruled out NaNs for exponent. |
- __ ucomisd(double_scratch, double_exponent); |
- __ j(not_equal, &fast_power, Label::kNear); |
- |
- // Calculates reciprocal of square root of base. Check for the special |
- // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). |
- // According to IEEE-754, single-precision -Infinity has the highest |
- // 9 bits set and the lowest 23 bits cleared. |
- __ mov(scratch, 0xFF800000u); |
- __ movd(double_scratch, scratch); |
- __ cvtss2sd(double_scratch, double_scratch); |
- __ ucomisd(double_base, double_scratch); |
- // Comparing -Infinity with NaN results in "unordered", which sets the |
- // zero flag as if both were equal. However, it also sets the carry flag. |
- __ j(not_equal, &continue_rsqrt, Label::kNear); |
- __ j(carry, &continue_rsqrt, Label::kNear); |
- |
- // Set result to 0 in the special case. |
- __ xorps(double_result, double_result); |
- __ jmp(&done); |
- |
- __ bind(&continue_rsqrt); |
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0. |
- __ xorps(double_exponent, double_exponent); |
- __ addsd(double_exponent, double_base); // Convert -0 to +0. |
- __ sqrtsd(double_exponent, double_exponent); |
- __ divsd(double_result, double_exponent); |
- __ jmp(&done); |
- } |
- |
- // Using FPU instructions to calculate power. |
- Label fast_power_failed; |
- __ bind(&fast_power); |
- __ fnclex(); // Clear flags to catch exceptions later. |
- // Transfer (B)ase and (E)xponent onto the FPU register stack. |
- __ sub(esp, Immediate(kDoubleSize)); |
- __ movsd(Operand(esp, 0), double_exponent); |
- __ fld_d(Operand(esp, 0)); // E |
- __ movsd(Operand(esp, 0), double_base); |
- __ fld_d(Operand(esp, 0)); // B, E |
- |
- // Exponent is in st(1) and base is in st(0) |
- // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B) |
- // FYL2X calculates st(1) * log2(st(0)) |
- __ fyl2x(); // X |
- __ fld(0); // X, X |
- __ frndint(); // rnd(X), X |
- __ fsub(1); // rnd(X), X-rnd(X) |
- __ fxch(1); // X - rnd(X), rnd(X) |
- // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1 |
- __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X) |
- __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X) |
- __ faddp(1); // 2^(X-rnd(X)), rnd(X) |
- // FSCALE calculates st(0) * 2^st(1) |
- __ fscale(); // 2^X, rnd(X) |
- __ fstp(1); // 2^X |
- // Bail out to runtime in case of exceptions in the status word. |
- __ fnstsw_ax(); |
- __ test_b(eax, 0x5F); // We check for all but precision exception. |
- __ j(not_zero, &fast_power_failed, Label::kNear); |
- __ fstp_d(Operand(esp, 0)); |
- __ movsd(double_result, Operand(esp, 0)); |
- __ add(esp, Immediate(kDoubleSize)); |
- __ jmp(&done); |
- |
- __ bind(&fast_power_failed); |
- __ fninit(); |
- __ add(esp, Immediate(kDoubleSize)); |
- __ jmp(&call_runtime); |
- } |
- |
- // Calculate power with integer exponent. |
- __ bind(&int_exponent); |
- const XMMRegister double_scratch2 = double_exponent; |
- __ mov(scratch, exponent); // Back up exponent. |
- __ movsd(double_scratch, double_base); // Back up base. |
- __ movsd(double_scratch2, double_result); // Load double_exponent with 1. |
- |
- // Get absolute value of exponent. |
- Label no_neg, while_true, while_false; |
- __ test(scratch, scratch); |
- __ j(positive, &no_neg, Label::kNear); |
- __ neg(scratch); |
- __ bind(&no_neg); |
- |
- __ j(zero, &while_false, Label::kNear); |
- __ shr(scratch, 1); |
- // Above condition means CF==0 && ZF==0. This means that the |
- // bit that has been shifted out is 0 and the result is not 0. |
- __ j(above, &while_true, Label::kNear); |
- __ movsd(double_result, double_scratch); |
- __ j(zero, &while_false, Label::kNear); |
- |
- __ bind(&while_true); |
- __ shr(scratch, 1); |
- __ mulsd(double_scratch, double_scratch); |
- __ j(above, &while_true, Label::kNear); |
- __ mulsd(double_result, double_scratch); |
- __ j(not_zero, &while_true); |
- |
- __ bind(&while_false); |
- // scratch has the original value of the exponent - if the exponent is |
- // negative, return 1/result. |
- __ test(exponent, exponent); |
- __ j(positive, &done); |
- __ divsd(double_scratch2, double_result); |
- __ movsd(double_result, double_scratch2); |
- // Test whether result is zero. Bail out to check for subnormal result. |
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. |
- __ xorps(double_scratch2, double_scratch2); |
- __ ucomisd(double_scratch2, double_result); // Result cannot be NaN. |
- // double_exponent aliased as double_scratch2 has already been overwritten |
- // and may not have contained the exponent value in the first place when the |
- // exponent is a smi. We reset it with exponent value before bailing out. |
- __ j(not_equal, &done); |
- __ Cvtsi2sd(double_exponent, exponent); |
- |
- // Returning or bailing out. |
- Counters* counters = isolate()->counters(); |
- if (exponent_type_ == ON_STACK) { |
- // The arguments are still on the stack. |
- __ bind(&call_runtime); |
- __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1); |
- |
- // The stub is called from non-optimized code, which expects the result |
- // as heap number in exponent. |
- __ bind(&done); |
- __ AllocateHeapNumber(eax, scratch, base, &call_runtime); |
- __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result); |
- __ IncrementCounter(counters->math_pow(), 1); |
- __ ret(2 * kPointerSize); |
- } else { |
- __ bind(&call_runtime); |
- { |
- AllowExternalCallThatCantCauseGC scope(masm); |
- __ PrepareCallCFunction(4, scratch); |
- __ movsd(Operand(esp, 0 * kDoubleSize), double_base); |
- __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent); |
- __ CallCFunction( |
- ExternalReference::power_double_double_function(isolate()), 4); |
- } |
- // Return value is in st(0) on ia32. |
- // Store it into the (fixed) result register. |
- __ sub(esp, Immediate(kDoubleSize)); |
- __ fstp_d(Operand(esp, 0)); |
- __ movsd(double_result, Operand(esp, 0)); |
- __ add(esp, Immediate(kDoubleSize)); |
- |
- __ bind(&done); |
- __ IncrementCounter(counters->math_pow(), 1); |
- __ ret(0); |
- } |
+ // No SSE2 support |
+ UNREACHABLE(); |
} |
@@ -1768,15 +1441,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
__ RecordWriteField(ebx, |
RegExpImpl::kLastSubjectOffset, |
eax, |
- edi, |
- kDontSaveFPRegs); |
+ edi); |
__ mov(eax, ecx); |
__ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); |
__ RecordWriteField(ebx, |
RegExpImpl::kLastInputOffset, |
eax, |
- edi, |
- kDontSaveFPRegs); |
+ edi); |
// Get the static offsets vector filled by the native regexp code. |
ExternalReference address_of_static_offsets_vector = |
@@ -2046,17 +1717,29 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { |
Label non_number_comparison; |
Label unordered; |
__ bind(&generic_heap_number_comparison); |
+ FloatingPointHelper::CheckFloatOperands( |
+ masm, &non_number_comparison, ebx); |
+ FloatingPointHelper::LoadFloatOperand(masm, eax); |
+ FloatingPointHelper::LoadFloatOperand(masm, edx); |
+ __ FCmp(); |
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); |
- __ ucomisd(xmm0, xmm1); |
// Don't base result on EFLAGS when a NaN is involved. |
__ j(parity_even, &unordered, Label::kNear); |
- __ mov(eax, 0); // equal |
- __ mov(ecx, Immediate(Smi::FromInt(1))); |
- __ cmov(above, eax, ecx); |
- __ mov(ecx, Immediate(Smi::FromInt(-1))); |
- __ cmov(below, eax, ecx); |
+ Label below_label, above_label; |
+ // Return a result of -1, 0, or 1, based on EFLAGS. |
+ __ j(below, &below_label, Label::kNear); |
+ __ j(above, &above_label, Label::kNear); |
+ |
+ __ Move(eax, Immediate(0)); |
+ __ ret(0); |
+ |
+ __ bind(&below_label); |
+ __ mov(eax, Immediate(Smi::FromInt(-1))); |
+ __ ret(0); |
+ |
+ __ bind(&above_label); |
+ __ mov(eax, Immediate(Smi::FromInt(1))); |
__ ret(0); |
// If one of the numbers was NaN, then the result is always false. |
@@ -2268,8 +1951,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { |
__ push(edi); |
__ push(ebx); |
__ push(edx); |
- __ RecordWriteArray(ebx, edi, edx, kDontSaveFPRegs, |
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
+ __ RecordWriteArray(ebx, edi, edx, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
__ pop(edx); |
__ pop(ebx); |
__ pop(edi); |
@@ -2642,19 +2324,12 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
void CodeStub::GenerateFPStubs(Isolate* isolate) { |
- CEntryStub save_doubles(isolate, 1, kSaveFPRegs); |
- // Stubs might already be in the snapshot, detect that and don't regenerate, |
- // which would lead to code stub initialization state being messed up. |
- Code* save_doubles_code; |
- if (!save_doubles.FindCodeInCache(&save_doubles_code)) { |
- save_doubles_code = *(save_doubles.GetCode()); |
- } |
- isolate->set_fp_stubs_generated(true); |
+ // Do nothing. |
} |
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { |
- CEntryStub stub(isolate, 1, kDontSaveFPRegs); |
+ CEntryStub stub(isolate, 1); |
stub.GetCode(); |
} |
@@ -2670,7 +2345,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { |
ProfileEntryHookStub::MaybeCallEntryHook(masm); |
// Enter the exit frame that transitions from JavaScript to C++. |
- __ EnterExitFrame(save_doubles_ == kSaveFPRegs); |
+ __ EnterExitFrame(); |
// ebx: pointer to C function (C callee-saved) |
// ebp: frame pointer (restored after C call) |
@@ -2726,7 +2401,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { |
} |
// Exit the JavaScript to C++ exit frame. |
- __ LeaveExitFrame(save_doubles_ == kSaveFPRegs); |
+ __ LeaveExitFrame(); |
__ ret(0); |
// Handling of exception. |
@@ -3797,46 +3472,18 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { |
__ JumpIfNotSmi(eax, &miss); |
} |
- // Load left and right operand. |
- Label done, left, left_smi, right_smi; |
- __ JumpIfSmi(eax, &right_smi, Label::kNear); |
+ // Inlining the double comparison and falling back to the general compare |
+ // stub if NaN is involved or SSE2 or CMOV is unsupported. |
+ __ mov(ecx, edx); |
+ __ and_(ecx, eax); |
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear); |
+ |
__ cmp(FieldOperand(eax, HeapObject::kMapOffset), |
isolate()->factory()->heap_number_map()); |
__ j(not_equal, &maybe_undefined1, Label::kNear); |
- __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); |
- __ jmp(&left, Label::kNear); |
- __ bind(&right_smi); |
- __ mov(ecx, eax); // Can't clobber eax because we can still jump away. |
- __ SmiUntag(ecx); |
- __ Cvtsi2sd(xmm1, ecx); |
- |
- __ bind(&left); |
- __ JumpIfSmi(edx, &left_smi, Label::kNear); |
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), |
isolate()->factory()->heap_number_map()); |
__ j(not_equal, &maybe_undefined2, Label::kNear); |
- __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); |
- __ jmp(&done); |
- __ bind(&left_smi); |
- __ mov(ecx, edx); // Can't clobber edx because we can still jump away. |
- __ SmiUntag(ecx); |
- __ Cvtsi2sd(xmm0, ecx); |
- |
- __ bind(&done); |
- // Compare operands. |
- __ ucomisd(xmm0, xmm1); |
- |
- // Don't base result on EFLAGS when a NaN is involved. |
- __ j(parity_even, &unordered, Label::kNear); |
- |
- // Return a result of -1, 0, or 1, based on EFLAGS. |
- // Performing mov, because xor would destroy the flag register. |
- __ mov(eax, 0); // equal |
- __ mov(ecx, Immediate(Smi::FromInt(1))); |
- __ cmov(above, eax, ecx); |
- __ mov(ecx, Immediate(Smi::FromInt(-1))); |
- __ cmov(below, eax, ecx); |
- __ ret(0); |
__ bind(&unordered); |
__ bind(&generic_stub); |
@@ -4326,10 +3973,8 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { |
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( |
Isolate* isolate) { |
- StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs); |
+ StoreBufferOverflowStub stub(isolate); |
stub.GetCode(); |
- StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); |
- stub2.GetCode(); |
} |
@@ -4352,7 +3997,6 @@ void RecordWriteStub::Generate(MacroAssembler* masm) { |
__ RememberedSetHelper(object_, |
address_, |
value_, |
- save_fp_regs_mode_, |
MacroAssembler::kReturnAtEnd); |
} else { |
__ ret(0); |
@@ -4399,7 +4043,6 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { |
__ RememberedSetHelper(object_, |
address_, |
value_, |
- save_fp_regs_mode_, |
MacroAssembler::kReturnAtEnd); |
__ bind(&dont_need_remembered_set); |
@@ -4416,7 +4059,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { |
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { |
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); |
+ regs_.SaveCallerSaveRegisters(masm); |
int argument_count = 3; |
__ PrepareCallCFunction(argument_count, regs_.scratch0()); |
__ mov(Operand(esp, 0 * kPointerSize), regs_.object()); |
@@ -4429,7 +4072,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { |
ExternalReference::incremental_marking_record_write_function(isolate()), |
argument_count); |
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); |
+ regs_.RestoreCallerSaveRegisters(masm); |
} |
@@ -4463,7 +4106,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( |
__ RememberedSetHelper(object_, |
address_, |
value_, |
- save_fp_regs_mode_, |
MacroAssembler::kReturnAtEnd); |
} else { |
__ ret(0); |
@@ -4511,7 +4153,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( |
__ RememberedSetHelper(object_, |
address_, |
value_, |
- save_fp_regs_mode_, |
MacroAssembler::kReturnAtEnd); |
} else { |
__ ret(0); |
@@ -4582,7 +4223,6 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { |
__ mov(Operand(ecx, 0), eax); |
// Update the write barrier for the array store. |
__ RecordWrite(ebx, ecx, eax, |
- kDontSaveFPRegs, |
EMIT_REMEMBERED_SET, |
OMIT_SMI_CHECK); |
__ ret(0); |
@@ -4604,15 +4244,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { |
edx, |
ecx, |
edi, |
- xmm0, |
- &slow_elements_from_double); |
+ &slow_elements_from_double, |
+ false); |
__ pop(edx); |
__ ret(0); |
} |
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { |
- CEntryStub ces(isolate(), 1, kSaveFPRegs); |
+ CEntryStub ces(isolate(), 1); |
__ call(ces.GetCode(), RelocInfo::CODE_TARGET); |
int parameter_count_offset = |
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
@@ -5138,4 +4778,4 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { |
} } // namespace v8::internal |
-#endif // V8_TARGET_ARCH_IA32 |
+#endif // V8_TARGET_ARCH_X87 |