| Index: src/x64/lithium-codegen-x64.cc
|
| ===================================================================
|
| --- src/x64/lithium-codegen-x64.cc (revision 15486)
|
| +++ src/x64/lithium-codegen-x64.cc (working copy)
|
| @@ -65,7 +65,11 @@
|
|
|
|
|
| #define __ masm()->
|
| +#define __k __
|
| +#define __q __
|
| +#define __n __
|
|
|
| +
|
| bool LCodeGen::GenerateCode() {
|
| LPhase phase("Z_Code generation", chunk());
|
| ASSERT(is_unused());
|
| @@ -141,8 +145,13 @@
|
| Label ok;
|
| __ testq(rcx, rcx);
|
| __ j(zero, &ok, Label::kNear);
|
| +#ifndef V8_TARGET_ARCH_X32
|
| // +1 for return address.
|
| int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
|
| +#else
|
| + int receiver_offset = 1 * kHWRegSize +
|
| + scope()->num_parameters() * kPointerSize;
|
| +#endif
|
| __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
|
| __ movq(Operand(rsp, receiver_offset), kScratchRegister);
|
| __ bind(&ok);
|
| @@ -171,7 +180,7 @@
|
| __ subq(rsp, Immediate(slots * kPointerSize));
|
| __ push(rax);
|
| __ Set(rax, slots);
|
| - __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
|
| + __n movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
|
| Label loop;
|
| __ bind(&loop);
|
| __ movq(MemOperand(rsp, rax, times_pointer_size, 0),
|
| @@ -1131,11 +1140,11 @@
|
| int32_t power = WhichPowerOf2(divisor_abs);
|
| if (divisor < 0) {
|
| __ movsxlq(result, dividend);
|
| - __ neg(result);
|
| + __k neg(result);
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| DeoptimizeIf(zero, instr->environment());
|
| }
|
| - __ sar(result, Immediate(power));
|
| + __k sar(result, Immediate(power));
|
| } else {
|
| if (!result.is(dividend)) {
|
| __ movl(result, dividend);
|
| @@ -1164,15 +1173,15 @@
|
| __ movsxlq(reg1, dividend);
|
| if (divisor < 0 &&
|
| instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - __ neg(reg1);
|
| + __k neg(reg1);
|
| DeoptimizeIf(zero, instr->environment());
|
| }
|
| - __ movq(reg2, multiplier, RelocInfo::NONE64);
|
| + __n movq(reg2, multiplier, RelocInfo::NONE64);
|
| // Result just fit in r64, because it's int32 * uint32.
|
| - __ imul(reg2, reg1);
|
| + __k imul(reg2, reg1);
|
|
|
| - __ addq(reg2, Immediate(1 << 30));
|
| - __ sar(reg2, Immediate(shift));
|
| + __k addq(reg2, Immediate(1 << 30));
|
| + __k sar(reg2, Immediate(shift));
|
| }
|
| }
|
|
|
| @@ -1536,7 +1545,7 @@
|
| } else {
|
| Register tmp = ToRegister(instr->temp());
|
| __ Set(tmp, int_val);
|
| - __ movq(res, tmp);
|
| + __k movq(res, tmp);
|
| }
|
| }
|
|
|
| @@ -1624,7 +1633,7 @@
|
| __ bind(&runtime);
|
| __ PrepareCallCFunction(2);
|
| __ movq(arg_reg_1, object);
|
| - __ movq(arg_reg_2, index, RelocInfo::NONE64);
|
| + __n movq(arg_reg_2, index, RelocInfo::NONE64);
|
| __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
|
| __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
| __ bind(&done);
|
| @@ -2449,7 +2458,12 @@
|
| __ push(ToRegister(instr->value()));
|
| __ PushHeapObject(instr->function());
|
|
|
| +#ifndef V8_TARGET_ARCH_X32
|
| static const int kAdditionalDelta = 10;
|
| +#else
|
| + // Actual size for X32.
|
| + static const int kAdditionalDelta = 16;
|
| +#endif
|
| int delta =
|
| masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
|
| ASSERT(delta >= 0);
|
| @@ -2542,7 +2556,7 @@
|
| // The argument count parameter is a smi
|
| __ SmiToInteger32(reg, reg);
|
| Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
|
| - __ pop(return_addr_reg);
|
| + __k pop(return_addr_reg);
|
| __ shl(reg, Immediate(kPointerSizeLog2));
|
| __ addq(rsp, reg);
|
| __ jmp(return_addr_reg);
|
| @@ -2865,7 +2879,12 @@
|
| int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
|
| int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
|
| int index = (const_length - const_index) + 1;
|
| +#ifndef V8_TARGET_ARCH_X32
|
| __ movq(result, Operand(arguments, index * kPointerSize));
|
| +#else
|
| + __ movl(result, Operand(arguments, index * kPointerSize +
|
| + 2 * kHWRegSize - 2 * kPointerSize));
|
| +#endif
|
| } else {
|
| Register length = ToRegister(instr->length());
|
| // There are two words between the frame pointer and the last argument.
|
| @@ -2875,8 +2894,15 @@
|
| } else {
|
| __ subl(length, ToOperand(instr->index()));
|
| }
|
| +#ifndef V8_TARGET_ARCH_X32
|
| __ movq(result,
|
| Operand(arguments, length, times_pointer_size, kPointerSize));
|
| +#else
|
| + // PC and FP are with kHWRegSize.
|
| + __ movl(result,
|
| + Operand(arguments, length, times_pointer_size,
|
| + 2 * kHWRegSize - 1 *kPointerSize));
|
| +#endif
|
| }
|
| }
|
|
|
| @@ -2896,7 +2922,7 @@
|
| } else if (instr->hydrogen()->IsDehoisted()) {
|
| // Sign extend key because it could be a 32 bit negative value
|
| // and the dehoisted address computation happens in 64 bits
|
| - __ movsxlq(key_reg, key_reg);
|
| + __k movsxlq(key_reg, key_reg);
|
| }
|
| }
|
| Operand operand(BuildFastArrayOperand(
|
| @@ -2969,7 +2995,7 @@
|
| } else if (instr->hydrogen()->IsDehoisted()) {
|
| // Sign extend key because it could be a 32 bit negative value
|
| // and the dehoisted address computation happens in 64 bits
|
| - __ movsxlq(key_reg, key_reg);
|
| + __k movsxlq(key_reg, key_reg);
|
| }
|
| }
|
|
|
| @@ -3011,7 +3037,7 @@
|
| } else if (instr->hydrogen()->IsDehoisted()) {
|
| // Sign extend key because it could be a 32 bit negative value
|
| // and the dehoisted address computation happens in 64 bits
|
| - __ movsxlq(key_reg, key_reg);
|
| + __k movsxlq(key_reg, key_reg);
|
| }
|
| }
|
|
|
| @@ -3086,7 +3112,7 @@
|
| Register result = ToRegister(instr->result());
|
|
|
| if (instr->hydrogen()->from_inlined()) {
|
| - __ lea(result, Operand(rsp, -2 * kPointerSize));
|
| + __q lea(result, Operand(rsp, -2 * kPointerSize));
|
| } else {
|
| // Check for arguments adapter frame.
|
| Label done, adapted;
|
| @@ -3208,7 +3234,12 @@
|
| __ testl(length, length);
|
| __ j(zero, &invoke, Label::kNear);
|
| __ bind(&loop);
|
| +#ifndef V8_TARGET_ARCH_X32
|
| __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
|
| +#else
|
| + __ Push(Operand(elements, length, times_pointer_size,
|
| + 2 * kHWRegSize - 1 * kPointerSize));
|
| +#endif
|
| __ decl(length);
|
| __ j(not_zero, &loop);
|
|
|
| @@ -3311,7 +3342,11 @@
|
| if (function.is_identical_to(info()->closure())) {
|
| __ CallSelf();
|
| } else {
|
| +#ifndef V8_TARGET_ARCH_X32
|
| __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
|
| +#else
|
| + __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
|
| +#endif
|
| }
|
|
|
| // Set up deoptimization.
|
| @@ -3384,10 +3419,10 @@
|
| __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
|
|
|
| __ bind(&allocated);
|
| - __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| - __ shl(tmp2, Immediate(1));
|
| - __ shr(tmp2, Immediate(1));
|
| - __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
|
| + __k movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| + __k shl(tmp2, Immediate(1));
|
| + __k shr(tmp2, Immediate(1));
|
| + __k movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
|
| __ StoreToSafepointRegisterSlot(input_reg, tmp);
|
|
|
| __ bind(&done);
|
| @@ -3453,8 +3488,8 @@
|
| CpuFeatureScope scope(masm(), SSE4_1);
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| // Deoptimize if minus zero.
|
| - __ movq(output_reg, input_reg);
|
| - __ subq(output_reg, Immediate(1));
|
| + __k movq(output_reg, input_reg);
|
| + __k subq(output_reg, Immediate(1));
|
| DeoptimizeIf(overflow, instr->environment());
|
| }
|
| __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
|
| @@ -3474,7 +3509,7 @@
|
| Label positive_sign;
|
| __ j(above, &positive_sign, Label::kNear);
|
| __ movmskpd(output_reg, input_reg);
|
| - __ testq(output_reg, Immediate(1));
|
| + __k testq(output_reg, Immediate(1));
|
| DeoptimizeIf(not_zero, instr->environment());
|
| __ Set(output_reg, 0);
|
| __ jmp(&done);
|
| @@ -3511,7 +3546,7 @@
|
| static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
|
|
|
| Label done, round_to_zero, below_one_half, do_not_compensate, restore;
|
| - __ movq(kScratchRegister, one_half, RelocInfo::NONE64);
|
| + __k movq(kScratchRegister, one_half, RelocInfo::NONE64);
|
| __ movq(xmm_scratch, kScratchRegister);
|
| __ ucomisd(xmm_scratch, input_reg);
|
| __ j(above, &below_one_half);
|
| @@ -3526,14 +3561,14 @@
|
| __ jmp(&done);
|
|
|
| __ bind(&below_one_half);
|
| - __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64);
|
| + __k movq(kScratchRegister, minus_one_half, RelocInfo::NONE64);
|
| __ movq(xmm_scratch, kScratchRegister);
|
| __ ucomisd(xmm_scratch, input_reg);
|
| __ j(below_equal, &round_to_zero);
|
|
|
| // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
|
| // compare and compensate.
|
| - __ movq(kScratchRegister, input_reg); // Back up input_reg.
|
| + __k movq(kScratchRegister, input_reg); // Back up input_reg.
|
| __ subsd(input_reg, xmm_scratch);
|
| __ cvttsd2si(output_reg, input_reg);
|
| // Catch minint due to overflow, and to prevent overflow when compensating.
|
| @@ -3547,15 +3582,15 @@
|
| __ subl(output_reg, Immediate(1));
|
| // No overflow because we already ruled out minint.
|
| __ bind(&restore);
|
| - __ movq(input_reg, kScratchRegister); // Restore input_reg.
|
| + __k movq(input_reg, kScratchRegister); // Restore input_reg.
|
| __ jmp(&done);
|
|
|
| __ bind(&round_to_zero);
|
| // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
|
| // we can ignore the difference between a result of -0 and +0.
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - __ movq(output_reg, input_reg);
|
| - __ testq(output_reg, output_reg);
|
| + __k movq(output_reg, input_reg);
|
| + __k testq(output_reg, output_reg);
|
| __ RecordComment("Minus zero");
|
| DeoptimizeIf(negative, instr->environment());
|
| }
|
| @@ -3666,7 +3701,9 @@
|
| #endif
|
|
|
| static const int kSeedSize = sizeof(uint32_t);
|
| +#ifndef V8_TARGET_ARCH_X32
|
| STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
|
| +#endif
|
|
|
| __ movq(global_object,
|
| FieldOperand(global_object, GlobalObject::kNativeContextOffset));
|
| @@ -4084,7 +4121,7 @@
|
| } else if (instr->hydrogen()->IsDehoisted()) {
|
| // Sign extend key because it could be a 32 bit negative value
|
| // and the dehoisted address computation happens in 64 bits
|
| - __ movsxlq(key_reg, key_reg);
|
| + __k movsxlq(key_reg, key_reg);
|
| }
|
| }
|
| Operand operand(BuildFastArrayOperand(
|
| @@ -4148,7 +4185,7 @@
|
| } else if (instr->hydrogen()->IsDehoisted()) {
|
| // Sign extend key because it could be a 32 bit negative value
|
| // and the dehoisted address computation happens in 64 bits
|
| - __ movsxlq(key_reg, key_reg);
|
| + __k movsxlq(key_reg, key_reg);
|
| }
|
| }
|
|
|
| @@ -4160,7 +4197,7 @@
|
|
|
| __ Set(kScratchRegister, BitCast<uint64_t>(
|
| FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
|
| - __ movq(value, kScratchRegister);
|
| + __k movq(value, kScratchRegister);
|
|
|
| __ bind(&have_value);
|
| }
|
| @@ -4191,7 +4228,7 @@
|
| } else if (instr->hydrogen()->IsDehoisted()) {
|
| // Sign extend key because it could be a 32 bit negative value
|
| // and the dehoisted address computation happens in 64 bits
|
| - __ movsxlq(key_reg, key_reg);
|
| + __k movsxlq(key_reg, key_reg);
|
| }
|
| }
|
|
|
| @@ -4402,7 +4439,7 @@
|
|
|
| __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
|
| __ j(above, deferred->entry());
|
| - __ movsxlq(char_code, char_code);
|
| + __k movsxlq(char_code, char_code);
|
| __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
|
| __ movq(result, FieldOperand(result,
|
| char_code, times_pointer_size,
|
| @@ -4474,14 +4511,79 @@
|
|
|
|
|
| void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
| +#ifndef V8_TARGET_ARCH_X32
|
| LOperand* input = instr->value();
|
| ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
| Register reg = ToRegister(input);
|
|
|
| __ Integer32ToSmi(reg, reg);
|
| +#else
|
| + class DeferredNumberTagI: public LDeferredCode {
|
| + public:
|
| + DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
|
| + : LDeferredCode(codegen), instr_(instr) { }
|
| + virtual void Generate() {
|
| + codegen()->DoDeferredNumberTagI(instr_);
|
| + }
|
| + virtual LInstruction* instr() { return instr_; }
|
| + private:
|
| + LNumberTagI* instr_;
|
| + };
|
| +
|
| + LOperand* input = instr->value();
|
| + ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
| + Register reg = ToRegister(input);
|
| +
|
| + DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
|
| + __ Integer32ToSmi(reg, reg);
|
| + __ j(overflow, deferred->entry());
|
| + __ bind(deferred->exit());
|
| +#endif
|
| }
|
|
|
|
|
| +#ifdef V8_TARGET_ARCH_X32
|
| +void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
|
| + Label slow;
|
| + Register reg = ToRegister(instr->value());
|
| + Register tmp = reg.is(rax) ? kScratchRegister : rax;
|
| +
|
| + // Preserve the value of all registers.
|
| + PushSafepointRegistersScope scope(this);
|
| +
|
| + Label done;
|
| + // There was overflow, so bits 30 and 31 of the original integer
|
| + // disagree. Try to allocate a heap number in new space and store
|
| + // the value in there. If that fails, call the runtime system.
|
| + __ SmiToInteger32(reg, reg);
|
| + __ xorl(reg, Immediate(0x80000000));
|
| + __ cvtlsi2sd(xmm0, reg);
|
| +
|
| + if (FLAG_inline_new) {
|
| + __ AllocateHeapNumber(reg, tmp, &slow);
|
| + __ jmp(&done, Label::kNear);
|
| + }
|
| +
|
| + // Slow case: Call the runtime system to do the number allocation.
|
| + __ bind(&slow);
|
| +
|
| + // Put a valid pointer value in the stack slot where the result
|
| + // register is stored, as this register is in the pointer map, but contains an
|
| + // integer value.
|
| + __ StoreToSafepointRegisterSlot(reg, Immediate(0));
|
| + CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
|
| + // Set the pointer to the new heap number in tmp.
|
| + if (!reg.is(rax)) __ movl(reg, rax);
|
| +
|
| + // Heap number allocated. Put the value in xmm0 into the value of the
|
| + // allocated heap number.
|
| + __ bind(&done);
|
| + __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
|
| + __ StoreToSafepointRegisterSlot(reg, reg);
|
| +}
|
| +#endif
|
| +
|
| +
|
| void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
|
| class DeferredNumberTagU: public LDeferredCode {
|
| public:
|
| @@ -4586,7 +4688,7 @@
|
| __ addq(rsp, Immediate(kDoubleSize));
|
| __ Set(kScratchRegister, BitCast<uint64_t>(
|
| FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
|
| - __ movq(input_reg, kScratchRegister);
|
| + __k movq(input_reg, kScratchRegister);
|
| }
|
|
|
| __ bind(&no_special_nan_handling);
|
| @@ -4687,7 +4789,7 @@
|
| __ ucomisd(xmm_scratch, result_reg);
|
| __ j(not_equal, &done, Label::kNear);
|
| __ movmskpd(kScratchRegister, result_reg);
|
| - __ testq(kScratchRegister, Immediate(1));
|
| + __k testq(kScratchRegister, Immediate(1));
|
| DeoptimizeIf(not_zero, env);
|
| }
|
| __ jmp(&done, Label::kNear);
|
| @@ -4725,7 +4827,7 @@
|
| __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| __ cvttsd2siq(input_reg, xmm0);
|
| __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
|
| - __ cmpq(input_reg, kScratchRegister);
|
| + __k cmpq(input_reg, kScratchRegister);
|
| DeoptimizeIf(equal, instr->environment());
|
| } else {
|
| // Deoptimize if we don't have a heap number.
|
| @@ -4814,10 +4916,10 @@
|
| // Performs a truncating conversion of a floating point number as used by
|
| // the JS bitwise operations.
|
| __ cvttsd2siq(result_reg, input_reg);
|
| - __ movq(kScratchRegister,
|
| + __k movq(kScratchRegister,
|
| V8_INT64_C(0x8000000000000000),
|
| RelocInfo::NONE64);
|
| - __ cmpq(result_reg, kScratchRegister);
|
| + __k cmpq(result_reg, kScratchRegister);
|
| DeoptimizeIf(equal, instr->environment());
|
| } else {
|
| __ cvttsd2si(result_reg, input_reg);
|
| @@ -5648,6 +5750,9 @@
|
| }
|
|
|
|
|
| +#undef __n
|
| +#undef __q
|
| +#undef __k
|
| #undef __
|
|
|
| } } // namespace v8::internal
|
|
|