| Index: src/ia32/lithium-codegen-ia32.cc
|
| diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
|
| index d119111138a2f40eb7036f7321be2102be003429..92b1250458acda624dbdf0facddff1728159bc28 100644
|
| --- a/src/ia32/lithium-codegen-ia32.cc
|
| +++ b/src/ia32/lithium-codegen-ia32.cc
|
| @@ -340,12 +340,41 @@ void LCodeGen::GenerateOsrPrologue() {
|
|
|
| osr_pc_offset_ = masm()->pc_offset();
|
|
|
| + // Move state of dynamic frame alignment into edx.
|
| + __ mov(edx, Immediate(kNoAlignmentPadding));
|
| +
|
| + if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
|
| + Label do_not_pad, align_loop;
|
| + // Align ebp + 4 to a multiple of 2 * kPointerSize.
|
| + __ test(ebp, Immediate(kPointerSize));
|
| + __ j(zero, &do_not_pad, Label::kNear);
|
| + __ push(Immediate(0));
|
| + __ mov(ebx, esp);
|
| + __ mov(edx, Immediate(kAlignmentPaddingPushed));
|
| +
|
| + // Move all parts of the frame over one word. The frame consists of:
|
| + // unoptimized frame slots, alignment state, context, frame pointer, return
|
| + // address, receiver, and the arguments.
|
| + __ mov(ecx, Immediate(scope()->num_parameters() +
|
| + 5 + graph()->osr()->UnoptimizedFrameSlots()));
|
| +
|
| + __ bind(&align_loop);
|
| + __ mov(eax, Operand(ebx, 1 * kPointerSize));
|
| + __ mov(Operand(ebx, 0), eax);
|
| + __ add(Operand(ebx), Immediate(kPointerSize));
|
| + __ dec(ecx);
|
| + __ j(not_zero, &align_loop, Label::kNear);
|
| + __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
|
| + __ sub(Operand(ebp), Immediate(kPointerSize));
|
| + __ bind(&do_not_pad);
|
| + }
|
| +
|
| // Save the first local, which is overwritten by the alignment state.
|
| Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
|
| __ push(alignment_loc);
|
|
|
| - // Set the dynamic frame alignment state to "not aligned".
|
| - __ mov(alignment_loc, Immediate(kNoAlignmentPadding));
|
| + // Set the dynamic frame alignment state.
|
| + __ mov(alignment_loc, edx);
|
|
|
| // Adjust the frame size, subsuming the unoptimized frame into the
|
| // optimized frame.
|
| @@ -1967,9 +1996,10 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
|
| __ movd(res, Operand(temp));
|
| __ psllq(res, 32);
|
| if (lower != 0) {
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| __ Set(temp, Immediate(lower));
|
| - __ movd(xmm0, Operand(temp));
|
| - __ por(res, xmm0);
|
| + __ movd(xmm_scratch, Operand(temp));
|
| + __ por(res, xmm_scratch);
|
| }
|
| }
|
| }
|
| @@ -2178,7 +2208,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
|
| __ jmp(&return_right, Label::kNear);
|
|
|
| __ bind(&check_zero);
|
| - XMMRegister xmm_scratch = xmm0;
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| __ xorps(xmm_scratch, xmm_scratch);
|
| __ ucomisd(left_reg, xmm_scratch);
|
| __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
|
| @@ -2338,25 +2368,6 @@ void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
|
| }
|
|
|
|
|
| -void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
|
| - Representation r = instr->hydrogen()->value()->representation();
|
| - if (r.IsSmiOrInteger32() || r.IsDouble()) {
|
| - EmitBranch(instr, no_condition);
|
| - } else {
|
| - ASSERT(r.IsTagged());
|
| - Register reg = ToRegister(instr->value());
|
| - HType type = instr->hydrogen()->value()->type();
|
| - if (type.IsTaggedNumber()) {
|
| - EmitBranch(instr, no_condition);
|
| - }
|
| - __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
|
| - __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
|
| - factory()->heap_number_map());
|
| - EmitBranch(instr, equal);
|
| - }
|
| -}
|
| -
|
| -
|
| void LCodeGen::DoBranch(LBranch* instr) {
|
| Representation r = instr->hydrogen()->value()->representation();
|
| if (r.IsSmiOrInteger32()) {
|
| @@ -2367,8 +2378,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
| ASSERT(!info()->IsStub());
|
| CpuFeatureScope scope(masm(), SSE2);
|
| XMMRegister reg = ToDoubleRegister(instr->value());
|
| - __ xorps(xmm0, xmm0);
|
| - __ ucomisd(reg, xmm0);
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| + __ xorps(xmm_scratch, xmm_scratch);
|
| + __ ucomisd(reg, xmm_scratch);
|
| EmitBranch(instr, not_equal);
|
| } else {
|
| ASSERT(r.IsTagged());
|
| @@ -2388,8 +2400,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
| } else if (type.IsHeapNumber()) {
|
| ASSERT(!info()->IsStub());
|
| CpuFeatureScope scope(masm(), SSE2);
|
| - __ xorps(xmm0, xmm0);
|
| - __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| + __ xorps(xmm_scratch, xmm_scratch);
|
| + __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
|
| EmitBranch(instr, not_equal);
|
| } else if (type.IsString()) {
|
| ASSERT(!info()->IsStub());
|
| @@ -2474,8 +2487,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
| __ j(not_equal, ¬_heap_number, Label::kNear);
|
| if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
|
| CpuFeatureScope scope(masm(), SSE2);
|
| - __ xorps(xmm0, xmm0);
|
| - __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| + __ xorps(xmm_scratch, xmm_scratch);
|
| + __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
|
| } else {
|
| __ fldz();
|
| __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
|
| @@ -3355,6 +3369,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
|
| }
|
|
|
|
|
| +void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
|
| + Register result = ToRegister(instr->result());
|
| + __ LoadRoot(result, instr->index());
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoLoadExternalArrayPointer(
|
| LLoadExternalArrayPointer* instr) {
|
| Register result = ToRegister(instr->result());
|
| @@ -3909,7 +3929,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
|
|
|
| CpuFeatureScope scope(masm(), SSE2);
|
| if (r.IsDouble()) {
|
| - XMMRegister scratch = xmm0;
|
| + XMMRegister scratch = double_scratch0();
|
| XMMRegister input_reg = ToDoubleRegister(instr->value());
|
| __ xorps(scratch, scratch);
|
| __ subsd(scratch, input_reg);
|
| @@ -3930,7 +3950,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
|
|
|
| void LCodeGen::DoMathFloor(LMathFloor* instr) {
|
| CpuFeatureScope scope(masm(), SSE2);
|
| - XMMRegister xmm_scratch = xmm0;
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| Register output_reg = ToRegister(instr->result());
|
| XMMRegister input_reg = ToDoubleRegister(instr->value());
|
|
|
| @@ -3998,7 +4018,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
|
| CpuFeatureScope scope(masm(), SSE2);
|
| Register output_reg = ToRegister(instr->result());
|
| XMMRegister input_reg = ToDoubleRegister(instr->value());
|
| - XMMRegister xmm_scratch = xmm0;
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| XMMRegister input_temp = ToDoubleRegister(instr->temp());
|
| ExternalReference one_half = ExternalReference::address_of_one_half();
|
| ExternalReference minus_one_half =
|
| @@ -4065,7 +4085,7 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
|
|
|
| void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
|
| CpuFeatureScope scope(masm(), SSE2);
|
| - XMMRegister xmm_scratch = xmm0;
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| XMMRegister input_reg = ToDoubleRegister(instr->value());
|
| Register scratch = ToRegister(instr->temp());
|
| ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
|
| @@ -4184,8 +4204,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
|
| // by computing:
|
| // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
|
| XMMRegister result = ToDoubleRegister(instr->result());
|
| - // We use xmm0 as fixed scratch register here.
|
| - XMMRegister scratch4 = xmm0;
|
| + XMMRegister scratch4 = double_scratch0();
|
| __ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
|
| __ movd(scratch4, scratch3);
|
| __ movd(result, random);
|
| @@ -4199,9 +4218,10 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
|
| CpuFeatureScope scope(masm(), SSE2);
|
| ASSERT(instr->value()->Equals(instr->result()));
|
| XMMRegister input_reg = ToDoubleRegister(instr->value());
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| Label positive, done, zero;
|
| - __ xorps(xmm0, xmm0);
|
| - __ ucomisd(input_reg, xmm0);
|
| + __ xorps(xmm_scratch, xmm_scratch);
|
| + __ ucomisd(input_reg, xmm_scratch);
|
| __ j(above, &positive, Label::kNear);
|
| __ j(equal, &zero, Label::kNear);
|
| ExternalReference nan =
|
| @@ -4231,10 +4251,11 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
|
| CpuFeatureScope scope(masm(), SSE2);
|
| XMMRegister input = ToDoubleRegister(instr->value());
|
| XMMRegister result = ToDoubleRegister(instr->result());
|
| + XMMRegister temp0 = double_scratch0();
|
| Register temp1 = ToRegister(instr->temp1());
|
| Register temp2 = ToRegister(instr->temp2());
|
|
|
| - MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
|
| + MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
|
| }
|
|
|
|
|
| @@ -4615,8 +4636,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
| if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
|
| if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
|
| CpuFeatureScope scope(masm(), SSE2);
|
| - __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
|
| - __ movss(operand, xmm0);
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| + __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
|
| + __ movss(operand, xmm_scratch);
|
| } else {
|
| __ fld(0);
|
| __ fstp_s(operand);
|
| @@ -5079,6 +5101,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
|
| Label slow;
|
| Register reg = ToRegister(value);
|
| Register tmp = reg.is(eax) ? ecx : eax;
|
| + XMMRegister xmm_scratch = double_scratch0();
|
|
|
| // Preserve the value of all registers.
|
| PushSafepointRegistersScope scope(this);
|
| @@ -5093,7 +5116,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
|
| __ xor_(reg, 0x80000000);
|
| if (CpuFeatures::IsSupported(SSE2)) {
|
| CpuFeatureScope feature_scope(masm(), SSE2);
|
| - __ Cvtsi2sd(xmm0, Operand(reg));
|
| + __ Cvtsi2sd(xmm_scratch, Operand(reg));
|
| } else {
|
| __ push(reg);
|
| __ fild_s(Operand(esp, 0));
|
| @@ -5102,7 +5125,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
|
| } else {
|
| if (CpuFeatures::IsSupported(SSE2)) {
|
| CpuFeatureScope feature_scope(masm(), SSE2);
|
| - __ LoadUint32(xmm0, reg,
|
| + __ LoadUint32(xmm_scratch, reg,
|
| ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
|
| } else {
|
| // There's no fild variant for unsigned values, so zero-extend to a 64-bit
|
| @@ -5138,12 +5161,12 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
|
| instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
| if (!reg.is(eax)) __ mov(reg, eax);
|
|
|
| - // Done. Put the value in xmm0 into the value of the allocated heap
|
| + // Done. Put the value in xmm_scratch into the value of the allocated heap
|
| // number.
|
| __ bind(&done);
|
| if (CpuFeatures::IsSupported(SSE2)) {
|
| CpuFeatureScope feature_scope(masm(), SSE2);
|
| - __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
|
| + __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
|
| } else {
|
| __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
|
| }
|
| @@ -5333,7 +5356,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
| __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
|
|
| if (deoptimize_on_minus_zero) {
|
| - XMMRegister xmm_scratch = xmm0;
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| __ xorps(xmm_scratch, xmm_scratch);
|
| __ ucomisd(result_reg, xmm_scratch);
|
| __ j(not_zero, &done, Label::kNear);
|
| @@ -5499,7 +5522,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
| if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
|
| CpuFeatureScope scope(masm(), SSE2);
|
| XMMRegister input_reg = ToDoubleRegister(input);
|
| - __ DoubleToI(result_reg, input_reg, xmm0,
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| + __ DoubleToI(result_reg, input_reg, xmm_scratch,
|
| instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
|
| } else {
|
| X87Register input_reg = ToX87Register(input);
|
| @@ -5526,7 +5550,8 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
|
| if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
|
| CpuFeatureScope scope(masm(), SSE2);
|
| XMMRegister input_reg = ToDoubleRegister(input);
|
| - __ DoubleToI(result_reg, input_reg, xmm0,
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| + __ DoubleToI(result_reg, input_reg, xmm_scratch,
|
| instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
|
| } else {
|
| X87Register input_reg = ToX87Register(input);
|
| @@ -5606,7 +5631,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
|
|
|
|
|
| void LCodeGen::DoCheckValue(LCheckValue* instr) {
|
| - Handle<HeapObject> object = instr->hydrogen()->object();
|
| + Handle<HeapObject> object = instr->hydrogen()->object().handle();
|
| if (instr->hydrogen()->object_in_new_space()) {
|
| Register reg = ToRegister(instr->value());
|
| Handle<Cell> cell = isolate()->factory()->NewCell(object);
|
| @@ -5661,22 +5686,21 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
| ASSERT(input->IsRegister());
|
| Register reg = ToRegister(input);
|
|
|
| - SmallMapList* map_set = instr->hydrogen()->map_set();
|
| -
|
| DeferredCheckMaps* deferred = NULL;
|
| if (instr->hydrogen()->has_migration_target()) {
|
| deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
|
| __ bind(deferred->check_maps());
|
| }
|
|
|
| + UniqueSet<Map> map_set = instr->hydrogen()->map_set();
|
| Label success;
|
| - for (int i = 0; i < map_set->length() - 1; i++) {
|
| - Handle<Map> map = map_set->at(i);
|
| + for (int i = 0; i < map_set.size() - 1; i++) {
|
| + Handle<Map> map = map_set.at(i).handle();
|
| __ CompareMap(reg, map, &success);
|
| __ j(equal, &success);
|
| }
|
|
|
| - Handle<Map> map = map_set->last();
|
| + Handle<Map> map = map_set.at(map_set.size() - 1).handle();
|
| __ CompareMap(reg, map, &success);
|
| if (instr->hydrogen()->has_migration_target()) {
|
| __ j(not_equal, deferred->entry());
|
| @@ -5691,8 +5715,9 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
| void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
|
| CpuFeatureScope scope(masm(), SSE2);
|
| XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| Register result_reg = ToRegister(instr->result());
|
| - __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
|
| + __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
|
| }
|
|
|
|
|
| @@ -5708,6 +5733,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
|
|
| ASSERT(instr->unclamped()->Equals(instr->result()));
|
| Register input_reg = ToRegister(instr->unclamped());
|
| + XMMRegister xmm_scratch = double_scratch0();
|
| Label is_smi, done, heap_number;
|
|
|
| __ JumpIfSmi(input_reg, &is_smi);
|
| @@ -5726,8 +5752,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
|
|
| // Heap number
|
| __ bind(&heap_number);
|
| - __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| - __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
|
| + __ movdbl(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| + __ ClampDoubleToUint8(xmm_scratch, xmm1, input_reg);
|
| __ jmp(&done, Label::kNear);
|
|
|
| // smi
|
|
|