| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 58 } | 58 } |
| 59 | 59 |
| 60 private: | 60 private: |
| 61 LCodeGen* codegen_; | 61 LCodeGen* codegen_; |
| 62 LPointerMap* pointers_; | 62 LPointerMap* pointers_; |
| 63 Safepoint::DeoptMode deopt_mode_; | 63 Safepoint::DeoptMode deopt_mode_; |
| 64 }; | 64 }; |
| 65 | 65 |
| 66 | 66 |
| 67 #define __ masm()-> | 67 #define __ masm()-> |
| 68 #define __k __ |
| 69 #define __q __ |
| 70 #define __n __ |
| 71 |
| 68 | 72 |
| 69 bool LCodeGen::GenerateCode() { | 73 bool LCodeGen::GenerateCode() { |
| 70 LPhase phase("Z_Code generation", chunk()); | 74 LPhase phase("Z_Code generation", chunk()); |
| 71 ASSERT(is_unused()); | 75 ASSERT(is_unused()); |
| 72 status_ = GENERATING; | 76 status_ = GENERATING; |
| 73 | 77 |
| 74 // Open a frame scope to indicate that there is a frame on the stack. The | 78 // Open a frame scope to indicate that there is a frame on the stack. The |
| 75 // MANUAL indicates that the scope shouldn't actually generate code to set up | 79 // MANUAL indicates that the scope shouldn't actually generate code to set up |
| 76 // the frame (that is done in GeneratePrologue). | 80 // the frame (that is done in GeneratePrologue). |
| 77 FrameScope frame_scope(masm_, StackFrame::MANUAL); | 81 FrameScope frame_scope(masm_, StackFrame::MANUAL); |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 134 #endif | 138 #endif |
| 135 | 139 |
| 136 // Strict mode functions need to replace the receiver with undefined | 140 // Strict mode functions need to replace the receiver with undefined |
| 137 // when called as functions (without an explicit receiver | 141 // when called as functions (without an explicit receiver |
| 138 // object). rcx is zero for method calls and non-zero for function | 142 // object). rcx is zero for method calls and non-zero for function |
| 139 // calls. | 143 // calls. |
| 140 if (!info_->is_classic_mode() || info_->is_native()) { | 144 if (!info_->is_classic_mode() || info_->is_native()) { |
| 141 Label ok; | 145 Label ok; |
| 142 __ testq(rcx, rcx); | 146 __ testq(rcx, rcx); |
| 143 __ j(zero, &ok, Label::kNear); | 147 __ j(zero, &ok, Label::kNear); |
| 148 #ifndef V8_TARGET_ARCH_X32 |
| 144 // +1 for return address. | 149 // +1 for return address. |
| 145 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; | 150 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; |
| 151 #else |
| 152 int receiver_offset = 1 * kHWRegSize + |
| 153 scope()->num_parameters() * kPointerSize; |
| 154 #endif |
| 146 __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); | 155 __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); |
| 147 __ movq(Operand(rsp, receiver_offset), kScratchRegister); | 156 __ movq(Operand(rsp, receiver_offset), kScratchRegister); |
| 148 __ bind(&ok); | 157 __ bind(&ok); |
| 149 } | 158 } |
| 150 } | 159 } |
| 151 | 160 |
| 152 info()->set_prologue_offset(masm_->pc_offset()); | 161 info()->set_prologue_offset(masm_->pc_offset()); |
| 153 if (NeedsEagerFrame()) { | 162 if (NeedsEagerFrame()) { |
| 154 ASSERT(!frame_is_built_); | 163 ASSERT(!frame_is_built_); |
| 155 frame_is_built_ = true; | 164 frame_is_built_ = true; |
| 156 __ push(rbp); // Caller's frame pointer. | 165 __ push(rbp); // Caller's frame pointer. |
| 157 __ movq(rbp, rsp); | 166 __ movq(rbp, rsp); |
| 158 __ push(rsi); // Callee's context. | 167 __ push(rsi); // Callee's context. |
| 159 if (info()->IsStub()) { | 168 if (info()->IsStub()) { |
| 160 __ Push(Smi::FromInt(StackFrame::STUB)); | 169 __ Push(Smi::FromInt(StackFrame::STUB)); |
| 161 } else { | 170 } else { |
| 162 __ push(rdi); // Callee's JS function. | 171 __ push(rdi); // Callee's JS function. |
| 163 } | 172 } |
| 164 info()->AddNoFrameRange(0, masm_->pc_offset()); | 173 info()->AddNoFrameRange(0, masm_->pc_offset()); |
| 165 } | 174 } |
| 166 | 175 |
| 167 // Reserve space for the stack slots needed by the code. | 176 // Reserve space for the stack slots needed by the code. |
| 168 int slots = GetStackSlotCount(); | 177 int slots = GetStackSlotCount(); |
| 169 if (slots > 0) { | 178 if (slots > 0) { |
| 170 if (FLAG_debug_code) { | 179 if (FLAG_debug_code) { |
| 171 __ subq(rsp, Immediate(slots * kPointerSize)); | 180 __ subq(rsp, Immediate(slots * kPointerSize)); |
| 172 __ push(rax); | 181 __ push(rax); |
| 173 __ Set(rax, slots); | 182 __ Set(rax, slots); |
| 174 __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64); | 183 __n movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64); |
| 175 Label loop; | 184 Label loop; |
| 176 __ bind(&loop); | 185 __ bind(&loop); |
| 177 __ movq(MemOperand(rsp, rax, times_pointer_size, 0), | 186 __ movq(MemOperand(rsp, rax, times_pointer_size, 0), |
| 178 kScratchRegister); | 187 kScratchRegister); |
| 179 __ decl(rax); | 188 __ decl(rax); |
| 180 __ j(not_zero, &loop); | 189 __ j(not_zero, &loop); |
| 181 __ pop(rax); | 190 __ pop(rax); |
| 182 } else { | 191 } else { |
| 183 __ subq(rsp, Immediate(slots * kPointerSize)); | 192 __ subq(rsp, Immediate(slots * kPointerSize)); |
| 184 #ifdef _MSC_VER | 193 #ifdef _MSC_VER |
| (...skipping 939 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1124 DeoptimizeIf(overflow, instr->environment()); | 1133 DeoptimizeIf(overflow, instr->environment()); |
| 1125 } | 1134 } |
| 1126 return; | 1135 return; |
| 1127 } | 1136 } |
| 1128 | 1137 |
| 1129 uint32_t divisor_abs = abs(divisor); | 1138 uint32_t divisor_abs = abs(divisor); |
| 1130 if (IsPowerOf2(divisor_abs)) { | 1139 if (IsPowerOf2(divisor_abs)) { |
| 1131 int32_t power = WhichPowerOf2(divisor_abs); | 1140 int32_t power = WhichPowerOf2(divisor_abs); |
| 1132 if (divisor < 0) { | 1141 if (divisor < 0) { |
| 1133 __ movsxlq(result, dividend); | 1142 __ movsxlq(result, dividend); |
| 1134 __ neg(result); | 1143 __k neg(result); |
| 1135 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1144 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1136 DeoptimizeIf(zero, instr->environment()); | 1145 DeoptimizeIf(zero, instr->environment()); |
| 1137 } | 1146 } |
| 1138 __ sar(result, Immediate(power)); | 1147 __k sar(result, Immediate(power)); |
| 1139 } else { | 1148 } else { |
| 1140 if (!result.is(dividend)) { | 1149 if (!result.is(dividend)) { |
| 1141 __ movl(result, dividend); | 1150 __ movl(result, dividend); |
| 1142 } | 1151 } |
| 1143 __ sarl(result, Immediate(power)); | 1152 __ sarl(result, Immediate(power)); |
| 1144 } | 1153 } |
| 1145 } else { | 1154 } else { |
| 1146 Register reg1 = ToRegister(instr->temp()); | 1155 Register reg1 = ToRegister(instr->temp()); |
| 1147 Register reg2 = ToRegister(instr->result()); | 1156 Register reg2 = ToRegister(instr->result()); |
| 1148 | 1157 |
| 1149 // Find b which: 2^b < divisor_abs < 2^(b+1). | 1158 // Find b which: 2^b < divisor_abs < 2^(b+1). |
| 1150 unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs); | 1159 unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs); |
| 1151 unsigned shift = 32 + b; // Precision +1bit (effectively). | 1160 unsigned shift = 32 + b; // Precision +1bit (effectively). |
| 1152 double multiplier_f = | 1161 double multiplier_f = |
| 1153 static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs; | 1162 static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs; |
| 1154 int64_t multiplier; | 1163 int64_t multiplier; |
| 1155 if (multiplier_f - floor(multiplier_f) < 0.5) { | 1164 if (multiplier_f - floor(multiplier_f) < 0.5) { |
| 1156 multiplier = static_cast<int64_t>(floor(multiplier_f)); | 1165 multiplier = static_cast<int64_t>(floor(multiplier_f)); |
| 1157 } else { | 1166 } else { |
| 1158 multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1; | 1167 multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1; |
| 1159 } | 1168 } |
| 1160 // The multiplier is a uint32. | 1169 // The multiplier is a uint32. |
| 1161 ASSERT(multiplier > 0 && | 1170 ASSERT(multiplier > 0 && |
| 1162 multiplier < (static_cast<int64_t>(1) << 32)); | 1171 multiplier < (static_cast<int64_t>(1) << 32)); |
| 1163 // The multiply is int64, so sign-extend to r64. | 1172 // The multiply is int64, so sign-extend to r64. |
| 1164 __ movsxlq(reg1, dividend); | 1173 __ movsxlq(reg1, dividend); |
| 1165 if (divisor < 0 && | 1174 if (divisor < 0 && |
| 1166 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1175 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1167 __ neg(reg1); | 1176 __k neg(reg1); |
| 1168 DeoptimizeIf(zero, instr->environment()); | 1177 DeoptimizeIf(zero, instr->environment()); |
| 1169 } | 1178 } |
| 1170 __ movq(reg2, multiplier, RelocInfo::NONE64); | 1179 __n movq(reg2, multiplier, RelocInfo::NONE64); |
| 1171 // Result just fit in r64, because it's int32 * uint32. | 1180 // Result just fit in r64, because it's int32 * uint32. |
| 1172 __ imul(reg2, reg1); | 1181 __k imul(reg2, reg1); |
| 1173 | 1182 |
| 1174 __ addq(reg2, Immediate(1 << 30)); | 1183 __k addq(reg2, Immediate(1 << 30)); |
| 1175 __ sar(reg2, Immediate(shift)); | 1184 __k sar(reg2, Immediate(shift)); |
| 1176 } | 1185 } |
| 1177 } | 1186 } |
| 1178 | 1187 |
| 1179 | 1188 |
| 1180 void LCodeGen::DoDivI(LDivI* instr) { | 1189 void LCodeGen::DoDivI(LDivI* instr) { |
| 1181 if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) { | 1190 if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) { |
| 1182 Register dividend = ToRegister(instr->left()); | 1191 Register dividend = ToRegister(instr->left()); |
| 1183 int32_t divisor = | 1192 int32_t divisor = |
| 1184 HConstant::cast(instr->hydrogen()->right())->Integer32Value(); | 1193 HConstant::cast(instr->hydrogen()->right())->Integer32Value(); |
| 1185 int32_t test_value = 0; | 1194 int32_t test_value = 0; |
| (...skipping 343 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1529 XMMRegister res = ToDoubleRegister(instr->result()); | 1538 XMMRegister res = ToDoubleRegister(instr->result()); |
| 1530 double v = instr->value(); | 1539 double v = instr->value(); |
| 1531 uint64_t int_val = BitCast<uint64_t, double>(v); | 1540 uint64_t int_val = BitCast<uint64_t, double>(v); |
| 1532 // Use xor to produce +0.0 in a fast and compact way, but avoid to | 1541 // Use xor to produce +0.0 in a fast and compact way, but avoid to |
| 1533 // do so if the constant is -0.0. | 1542 // do so if the constant is -0.0. |
| 1534 if (int_val == 0) { | 1543 if (int_val == 0) { |
| 1535 __ xorps(res, res); | 1544 __ xorps(res, res); |
| 1536 } else { | 1545 } else { |
| 1537 Register tmp = ToRegister(instr->temp()); | 1546 Register tmp = ToRegister(instr->temp()); |
| 1538 __ Set(tmp, int_val); | 1547 __ Set(tmp, int_val); |
| 1539 __ movq(res, tmp); | 1548 __k movq(res, tmp); |
| 1540 } | 1549 } |
| 1541 } | 1550 } |
| 1542 | 1551 |
| 1543 | 1552 |
| 1544 void LCodeGen::DoConstantT(LConstantT* instr) { | 1553 void LCodeGen::DoConstantT(LConstantT* instr) { |
| 1545 Handle<Object> value = instr->value(); | 1554 Handle<Object> value = instr->value(); |
| 1546 AllowDeferredHandleDereference smi_check; | 1555 AllowDeferredHandleDereference smi_check; |
| 1547 if (value->IsSmi()) { | 1556 if (value->IsSmi()) { |
| 1548 __ Move(ToRegister(instr->result()), value); | 1557 __ Move(ToRegister(instr->result()), value); |
| 1549 } else { | 1558 } else { |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1617 __ cmpq(kScratchRegister, FieldOperand(object, | 1626 __ cmpq(kScratchRegister, FieldOperand(object, |
| 1618 JSDate::kCacheStampOffset)); | 1627 JSDate::kCacheStampOffset)); |
| 1619 __ j(not_equal, &runtime, Label::kNear); | 1628 __ j(not_equal, &runtime, Label::kNear); |
| 1620 __ movq(result, FieldOperand(object, JSDate::kValueOffset + | 1629 __ movq(result, FieldOperand(object, JSDate::kValueOffset + |
| 1621 kPointerSize * index->value())); | 1630 kPointerSize * index->value())); |
| 1622 __ jmp(&done); | 1631 __ jmp(&done); |
| 1623 } | 1632 } |
| 1624 __ bind(&runtime); | 1633 __ bind(&runtime); |
| 1625 __ PrepareCallCFunction(2); | 1634 __ PrepareCallCFunction(2); |
| 1626 __ movq(arg_reg_1, object); | 1635 __ movq(arg_reg_1, object); |
| 1627 __ movq(arg_reg_2, index, RelocInfo::NONE64); | 1636 __n movq(arg_reg_2, index, RelocInfo::NONE64); |
| 1628 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); | 1637 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); |
| 1629 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); | 1638 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
| 1630 __ bind(&done); | 1639 __ bind(&done); |
| 1631 } | 1640 } |
| 1632 } | 1641 } |
| 1633 | 1642 |
| 1634 | 1643 |
| 1635 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { | 1644 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { |
| 1636 Register string = ToRegister(instr->string()); | 1645 Register string = ToRegister(instr->string()); |
| 1637 Register index = ToRegister(instr->index()); | 1646 Register index = ToRegister(instr->index()); |
| (...skipping 804 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2442 Label* map_check) { | 2451 Label* map_check) { |
| 2443 { | 2452 { |
| 2444 PushSafepointRegistersScope scope(this); | 2453 PushSafepointRegistersScope scope(this); |
| 2445 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>( | 2454 InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>( |
| 2446 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck); | 2455 InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck); |
| 2447 InstanceofStub stub(flags); | 2456 InstanceofStub stub(flags); |
| 2448 | 2457 |
| 2449 __ push(ToRegister(instr->value())); | 2458 __ push(ToRegister(instr->value())); |
| 2450 __ PushHeapObject(instr->function()); | 2459 __ PushHeapObject(instr->function()); |
| 2451 | 2460 |
| 2461 #ifndef V8_TARGET_ARCH_X32 |
| 2452 static const int kAdditionalDelta = 10; | 2462 static const int kAdditionalDelta = 10; |
| 2463 #else |
| 2464 // Actual size for X32. |
| 2465 static const int kAdditionalDelta = 16; |
| 2466 #endif |
| 2453 int delta = | 2467 int delta = |
| 2454 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; | 2468 masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; |
| 2455 ASSERT(delta >= 0); | 2469 ASSERT(delta >= 0); |
| 2456 __ push_imm32(delta); | 2470 __ push_imm32(delta); |
| 2457 | 2471 |
| 2458 // We are pushing three values on the stack but recording a | 2472 // We are pushing three values on the stack but recording a |
| 2459 // safepoint with two arguments because stub is going to | 2473 // safepoint with two arguments because stub is going to |
| 2460 // remove the third argument from the stack before jumping | 2474 // remove the third argument from the stack before jumping |
| 2461 // to instanceof builtin on the slow path. | 2475 // to instanceof builtin on the slow path. |
| 2462 CallCodeGeneric(stub.GetCode(isolate()), | 2476 CallCodeGeneric(stub.GetCode(isolate()), |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2535 no_frame_start = masm_->pc_offset(); | 2549 no_frame_start = masm_->pc_offset(); |
| 2536 } | 2550 } |
| 2537 if (instr->has_constant_parameter_count()) { | 2551 if (instr->has_constant_parameter_count()) { |
| 2538 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize, | 2552 __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize, |
| 2539 rcx); | 2553 rcx); |
| 2540 } else { | 2554 } else { |
| 2541 Register reg = ToRegister(instr->parameter_count()); | 2555 Register reg = ToRegister(instr->parameter_count()); |
| 2542 // The argument count parameter is a smi | 2556 // The argument count parameter is a smi |
| 2543 __ SmiToInteger32(reg, reg); | 2557 __ SmiToInteger32(reg, reg); |
| 2544 Register return_addr_reg = reg.is(rcx) ? rbx : rcx; | 2558 Register return_addr_reg = reg.is(rcx) ? rbx : rcx; |
| 2545 __ pop(return_addr_reg); | 2559 __k pop(return_addr_reg); |
| 2546 __ shl(reg, Immediate(kPointerSizeLog2)); | 2560 __ shl(reg, Immediate(kPointerSizeLog2)); |
| 2547 __ addq(rsp, reg); | 2561 __ addq(rsp, reg); |
| 2548 __ jmp(return_addr_reg); | 2562 __ jmp(return_addr_reg); |
| 2549 } | 2563 } |
| 2550 if (no_frame_start != -1) { | 2564 if (no_frame_start != -1) { |
| 2551 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); | 2565 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); |
| 2552 } | 2566 } |
| 2553 } | 2567 } |
| 2554 | 2568 |
| 2555 | 2569 |
| (...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2858 | 2872 |
| 2859 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { | 2873 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { |
| 2860 Register arguments = ToRegister(instr->arguments()); | 2874 Register arguments = ToRegister(instr->arguments()); |
| 2861 Register result = ToRegister(instr->result()); | 2875 Register result = ToRegister(instr->result()); |
| 2862 | 2876 |
| 2863 if (instr->length()->IsConstantOperand() && | 2877 if (instr->length()->IsConstantOperand() && |
| 2864 instr->index()->IsConstantOperand()) { | 2878 instr->index()->IsConstantOperand()) { |
| 2865 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); | 2879 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); |
| 2866 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); | 2880 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); |
| 2867 int index = (const_length - const_index) + 1; | 2881 int index = (const_length - const_index) + 1; |
| 2882 #ifndef V8_TARGET_ARCH_X32 |
| 2868 __ movq(result, Operand(arguments, index * kPointerSize)); | 2883 __ movq(result, Operand(arguments, index * kPointerSize)); |
| 2884 #else |
| 2885 __ movl(result, Operand(arguments, index * kPointerSize + |
| 2886 2 * kHWRegSize - 2 * kPointerSize)); |
| 2887 #endif |
| 2869 } else { | 2888 } else { |
| 2870 Register length = ToRegister(instr->length()); | 2889 Register length = ToRegister(instr->length()); |
| 2871 // There are two words between the frame pointer and the last argument. | 2890 // There are two words between the frame pointer and the last argument. |
| 2872 // Subtracting from length accounts for one of them add one more. | 2891 // Subtracting from length accounts for one of them add one more. |
| 2873 if (instr->index()->IsRegister()) { | 2892 if (instr->index()->IsRegister()) { |
| 2874 __ subl(length, ToRegister(instr->index())); | 2893 __ subl(length, ToRegister(instr->index())); |
| 2875 } else { | 2894 } else { |
| 2876 __ subl(length, ToOperand(instr->index())); | 2895 __ subl(length, ToOperand(instr->index())); |
| 2877 } | 2896 } |
| 2897 #ifndef V8_TARGET_ARCH_X32 |
| 2878 __ movq(result, | 2898 __ movq(result, |
| 2879 Operand(arguments, length, times_pointer_size, kPointerSize)); | 2899 Operand(arguments, length, times_pointer_size, kPointerSize)); |
| 2900 #else |
| 2901 // PC and FP are with kHWRegSize. |
| 2902 __ movl(result, |
| 2903 Operand(arguments, length, times_pointer_size, |
| 2904 2 * kHWRegSize - 1 *kPointerSize)); |
| 2905 #endif |
| 2880 } | 2906 } |
| 2881 } | 2907 } |
| 2882 | 2908 |
| 2883 | 2909 |
| 2884 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { | 2910 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { |
| 2885 ElementsKind elements_kind = instr->elements_kind(); | 2911 ElementsKind elements_kind = instr->elements_kind(); |
| 2886 LOperand* key = instr->key(); | 2912 LOperand* key = instr->key(); |
| 2887 if (!key->IsConstantOperand()) { | 2913 if (!key->IsConstantOperand()) { |
| 2888 Register key_reg = ToRegister(key); | 2914 Register key_reg = ToRegister(key); |
| 2889 // Even though the HLoad/StoreKeyed (in this case) instructions force | 2915 // Even though the HLoad/StoreKeyed (in this case) instructions force |
| 2890 // the input representation for the key to be an integer, the input | 2916 // the input representation for the key to be an integer, the input |
| 2891 // gets replaced during bound check elimination with the index argument | 2917 // gets replaced during bound check elimination with the index argument |
| 2892 // to the bounds check, which can be tagged, so that case must be | 2918 // to the bounds check, which can be tagged, so that case must be |
| 2893 // handled here, too. | 2919 // handled here, too. |
| 2894 if (instr->hydrogen()->key()->representation().IsSmi()) { | 2920 if (instr->hydrogen()->key()->representation().IsSmi()) { |
| 2895 __ SmiToInteger64(key_reg, key_reg); | 2921 __ SmiToInteger64(key_reg, key_reg); |
| 2896 } else if (instr->hydrogen()->IsDehoisted()) { | 2922 } else if (instr->hydrogen()->IsDehoisted()) { |
| 2897 // Sign extend key because it could be a 32 bit negative value | 2923 // Sign extend key because it could be a 32 bit negative value |
| 2898 // and the dehoisted address computation happens in 64 bits | 2924 // and the dehoisted address computation happens in 64 bits |
| 2899 __ movsxlq(key_reg, key_reg); | 2925 __k movsxlq(key_reg, key_reg); |
| 2900 } | 2926 } |
| 2901 } | 2927 } |
| 2902 Operand operand(BuildFastArrayOperand( | 2928 Operand operand(BuildFastArrayOperand( |
| 2903 instr->elements(), | 2929 instr->elements(), |
| 2904 key, | 2930 key, |
| 2905 elements_kind, | 2931 elements_kind, |
| 2906 0, | 2932 0, |
| 2907 instr->additional_index())); | 2933 instr->additional_index())); |
| 2908 | 2934 |
| 2909 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 2935 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2962 Register key_reg = ToRegister(key); | 2988 Register key_reg = ToRegister(key); |
| 2963 // Even though the HLoad/StoreKeyed instructions force the input | 2989 // Even though the HLoad/StoreKeyed instructions force the input |
| 2964 // representation for the key to be an integer, the input gets replaced | 2990 // representation for the key to be an integer, the input gets replaced |
| 2965 // during bound check elimination with the index argument to the bounds | 2991 // during bound check elimination with the index argument to the bounds |
| 2966 // check, which can be tagged, so that case must be handled here, too. | 2992 // check, which can be tagged, so that case must be handled here, too. |
| 2967 if (instr->hydrogen()->key()->representation().IsSmi()) { | 2993 if (instr->hydrogen()->key()->representation().IsSmi()) { |
| 2968 __ SmiToInteger64(key_reg, key_reg); | 2994 __ SmiToInteger64(key_reg, key_reg); |
| 2969 } else if (instr->hydrogen()->IsDehoisted()) { | 2995 } else if (instr->hydrogen()->IsDehoisted()) { |
| 2970 // Sign extend key because it could be a 32 bit negative value | 2996 // Sign extend key because it could be a 32 bit negative value |
| 2971 // and the dehoisted address computation happens in 64 bits | 2997 // and the dehoisted address computation happens in 64 bits |
| 2972 __ movsxlq(key_reg, key_reg); | 2998 __k movsxlq(key_reg, key_reg); |
| 2973 } | 2999 } |
| 2974 } | 3000 } |
| 2975 | 3001 |
| 2976 if (instr->hydrogen()->RequiresHoleCheck()) { | 3002 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2977 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + | 3003 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + |
| 2978 sizeof(kHoleNanLower32); | 3004 sizeof(kHoleNanLower32); |
| 2979 Operand hole_check_operand = BuildFastArrayOperand( | 3005 Operand hole_check_operand = BuildFastArrayOperand( |
| 2980 instr->elements(), | 3006 instr->elements(), |
| 2981 key, | 3007 key, |
| 2982 FAST_DOUBLE_ELEMENTS, | 3008 FAST_DOUBLE_ELEMENTS, |
| (...skipping 21 matching lines...) Expand all Loading... |
| 3004 // Even though the HLoad/StoreKeyedFastElement instructions force | 3030 // Even though the HLoad/StoreKeyedFastElement instructions force |
| 3005 // the input representation for the key to be an integer, the input | 3031 // the input representation for the key to be an integer, the input |
| 3006 // gets replaced during bound check elimination with the index | 3032 // gets replaced during bound check elimination with the index |
| 3007 // argument to the bounds check, which can be tagged, so that | 3033 // argument to the bounds check, which can be tagged, so that |
| 3008 // case must be handled here, too. | 3034 // case must be handled here, too. |
| 3009 if (instr->hydrogen()->key()->representation().IsSmi()) { | 3035 if (instr->hydrogen()->key()->representation().IsSmi()) { |
| 3010 __ SmiToInteger64(key_reg, key_reg); | 3036 __ SmiToInteger64(key_reg, key_reg); |
| 3011 } else if (instr->hydrogen()->IsDehoisted()) { | 3037 } else if (instr->hydrogen()->IsDehoisted()) { |
| 3012 // Sign extend key because it could be a 32 bit negative value | 3038 // Sign extend key because it could be a 32 bit negative value |
| 3013 // and the dehoisted address computation happens in 64 bits | 3039 // and the dehoisted address computation happens in 64 bits |
| 3014 __ movsxlq(key_reg, key_reg); | 3040 __k movsxlq(key_reg, key_reg); |
| 3015 } | 3041 } |
| 3016 } | 3042 } |
| 3017 | 3043 |
| 3018 // Load the result. | 3044 // Load the result. |
| 3019 __ movq(result, | 3045 __ movq(result, |
| 3020 BuildFastArrayOperand(instr->elements(), | 3046 BuildFastArrayOperand(instr->elements(), |
| 3021 key, | 3047 key, |
| 3022 FAST_ELEMENTS, | 3048 FAST_ELEMENTS, |
| 3023 FixedArray::kHeaderSize - kHeapObjectTag, | 3049 FixedArray::kHeaderSize - kHeapObjectTag, |
| 3024 instr->additional_index())); | 3050 instr->additional_index())); |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3079 | 3105 |
| 3080 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); | 3106 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); |
| 3081 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 3107 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 3082 } | 3108 } |
| 3083 | 3109 |
| 3084 | 3110 |
| 3085 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { | 3111 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { |
| 3086 Register result = ToRegister(instr->result()); | 3112 Register result = ToRegister(instr->result()); |
| 3087 | 3113 |
| 3088 if (instr->hydrogen()->from_inlined()) { | 3114 if (instr->hydrogen()->from_inlined()) { |
| 3089 __ lea(result, Operand(rsp, -2 * kPointerSize)); | 3115 __q lea(result, Operand(rsp, -2 * kPointerSize)); |
| 3090 } else { | 3116 } else { |
| 3091 // Check for arguments adapter frame. | 3117 // Check for arguments adapter frame. |
| 3092 Label done, adapted; | 3118 Label done, adapted; |
| 3093 __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); | 3119 __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
| 3094 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset), | 3120 __ Cmp(Operand(result, StandardFrameConstants::kContextOffset), |
| 3095 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | 3121 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); |
| 3096 __ j(equal, &adapted, Label::kNear); | 3122 __ j(equal, &adapted, Label::kNear); |
| 3097 | 3123 |
| 3098 // No arguments adaptor frame. | 3124 // No arguments adaptor frame. |
| 3099 __ movq(result, rbp); | 3125 __ movq(result, rbp); |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3201 __ push(receiver); | 3227 __ push(receiver); |
| 3202 __ movq(receiver, length); | 3228 __ movq(receiver, length); |
| 3203 | 3229 |
| 3204 // Loop through the arguments pushing them onto the execution | 3230 // Loop through the arguments pushing them onto the execution |
| 3205 // stack. | 3231 // stack. |
| 3206 Label invoke, loop; | 3232 Label invoke, loop; |
| 3207 // length is a small non-negative integer, due to the test above. | 3233 // length is a small non-negative integer, due to the test above. |
| 3208 __ testl(length, length); | 3234 __ testl(length, length); |
| 3209 __ j(zero, &invoke, Label::kNear); | 3235 __ j(zero, &invoke, Label::kNear); |
| 3210 __ bind(&loop); | 3236 __ bind(&loop); |
| 3237 #ifndef V8_TARGET_ARCH_X32 |
| 3211 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); | 3238 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); |
| 3239 #else |
| 3240 __ Push(Operand(elements, length, times_pointer_size, |
| 3241 2 * kHWRegSize - 1 * kPointerSize)); |
| 3242 #endif |
| 3212 __ decl(length); | 3243 __ decl(length); |
| 3213 __ j(not_zero, &loop); | 3244 __ j(not_zero, &loop); |
| 3214 | 3245 |
| 3215 // Invoke the function. | 3246 // Invoke the function. |
| 3216 __ bind(&invoke); | 3247 __ bind(&invoke); |
| 3217 ASSERT(instr->HasPointerMap()); | 3248 ASSERT(instr->HasPointerMap()); |
| 3218 LPointerMap* pointers = instr->pointer_map(); | 3249 LPointerMap* pointers = instr->pointer_map(); |
| 3219 RecordPosition(pointers->position()); | 3250 RecordPosition(pointers->position()); |
| 3220 SafepointGenerator safepoint_generator( | 3251 SafepointGenerator safepoint_generator( |
| 3221 this, pointers, Safepoint::kLazyDeopt); | 3252 this, pointers, Safepoint::kLazyDeopt); |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3304 // is available to write to at this point. | 3335 // is available to write to at this point. |
| 3305 if (dont_adapt_arguments) { | 3336 if (dont_adapt_arguments) { |
| 3306 __ Set(rax, arity); | 3337 __ Set(rax, arity); |
| 3307 } | 3338 } |
| 3308 | 3339 |
| 3309 // Invoke function. | 3340 // Invoke function. |
| 3310 __ SetCallKind(rcx, call_kind); | 3341 __ SetCallKind(rcx, call_kind); |
| 3311 if (function.is_identical_to(info()->closure())) { | 3342 if (function.is_identical_to(info()->closure())) { |
| 3312 __ CallSelf(); | 3343 __ CallSelf(); |
| 3313 } else { | 3344 } else { |
| 3345 #ifndef V8_TARGET_ARCH_X32 |
| 3314 __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset)); | 3346 __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset)); |
| 3347 #else |
| 3348 __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset)); |
| 3349 #endif |
| 3315 } | 3350 } |
| 3316 | 3351 |
| 3317 // Set up deoptimization. | 3352 // Set up deoptimization. |
| 3318 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0); | 3353 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0); |
| 3319 } else { | 3354 } else { |
| 3320 // We need to adapt arguments. | 3355 // We need to adapt arguments. |
| 3321 SafepointGenerator generator( | 3356 SafepointGenerator generator( |
| 3322 this, pointers, Safepoint::kLazyDeopt); | 3357 this, pointers, Safepoint::kLazyDeopt); |
| 3323 ParameterCount count(arity); | 3358 ParameterCount count(arity); |
| 3324 ParameterCount expected(formal_parameter_count); | 3359 ParameterCount expected(formal_parameter_count); |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3377 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); | 3412 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); |
| 3378 // Set the pointer to the new heap number in tmp. | 3413 // Set the pointer to the new heap number in tmp. |
| 3379 if (!tmp.is(rax)) { | 3414 if (!tmp.is(rax)) { |
| 3380 __ movq(tmp, rax); | 3415 __ movq(tmp, rax); |
| 3381 } | 3416 } |
| 3382 | 3417 |
| 3383 // Restore input_reg after call to runtime. | 3418 // Restore input_reg after call to runtime. |
| 3384 __ LoadFromSafepointRegisterSlot(input_reg, input_reg); | 3419 __ LoadFromSafepointRegisterSlot(input_reg, input_reg); |
| 3385 | 3420 |
| 3386 __ bind(&allocated); | 3421 __ bind(&allocated); |
| 3387 __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 3422 __k movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 3388 __ shl(tmp2, Immediate(1)); | 3423 __k shl(tmp2, Immediate(1)); |
| 3389 __ shr(tmp2, Immediate(1)); | 3424 __k shr(tmp2, Immediate(1)); |
| 3390 __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2); | 3425 __k movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2); |
| 3391 __ StoreToSafepointRegisterSlot(input_reg, tmp); | 3426 __ StoreToSafepointRegisterSlot(input_reg, tmp); |
| 3392 | 3427 |
| 3393 __ bind(&done); | 3428 __ bind(&done); |
| 3394 } | 3429 } |
| 3395 | 3430 |
| 3396 | 3431 |
| 3397 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3432 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
| 3398 Register input_reg = ToRegister(instr->value()); | 3433 Register input_reg = ToRegister(instr->value()); |
| 3399 __ testl(input_reg, input_reg); | 3434 __ testl(input_reg, input_reg); |
| 3400 Label is_positive; | 3435 Label is_positive; |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3446 | 3481 |
| 3447 void LCodeGen::DoMathFloor(LMathFloor* instr) { | 3482 void LCodeGen::DoMathFloor(LMathFloor* instr) { |
| 3448 XMMRegister xmm_scratch = xmm0; | 3483 XMMRegister xmm_scratch = xmm0; |
| 3449 Register output_reg = ToRegister(instr->result()); | 3484 Register output_reg = ToRegister(instr->result()); |
| 3450 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3485 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3451 | 3486 |
| 3452 if (CpuFeatures::IsSupported(SSE4_1)) { | 3487 if (CpuFeatures::IsSupported(SSE4_1)) { |
| 3453 CpuFeatureScope scope(masm(), SSE4_1); | 3488 CpuFeatureScope scope(masm(), SSE4_1); |
| 3454 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3489 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3455 // Deoptimize if minus zero. | 3490 // Deoptimize if minus zero. |
| 3456 __ movq(output_reg, input_reg); | 3491 __k movq(output_reg, input_reg); |
| 3457 __ subq(output_reg, Immediate(1)); | 3492 __k subq(output_reg, Immediate(1)); |
| 3458 DeoptimizeIf(overflow, instr->environment()); | 3493 DeoptimizeIf(overflow, instr->environment()); |
| 3459 } | 3494 } |
| 3460 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); | 3495 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); |
| 3461 __ cvttsd2si(output_reg, xmm_scratch); | 3496 __ cvttsd2si(output_reg, xmm_scratch); |
| 3462 __ cmpl(output_reg, Immediate(0x80000000)); | 3497 __ cmpl(output_reg, Immediate(0x80000000)); |
| 3463 DeoptimizeIf(equal, instr->environment()); | 3498 DeoptimizeIf(equal, instr->environment()); |
| 3464 } else { | 3499 } else { |
| 3465 Label negative_sign, done; | 3500 Label negative_sign, done; |
| 3466 // Deoptimize on unordered. | 3501 // Deoptimize on unordered. |
| 3467 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. | 3502 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. |
| 3468 __ ucomisd(input_reg, xmm_scratch); | 3503 __ ucomisd(input_reg, xmm_scratch); |
| 3469 DeoptimizeIf(parity_even, instr->environment()); | 3504 DeoptimizeIf(parity_even, instr->environment()); |
| 3470 __ j(below, &negative_sign, Label::kNear); | 3505 __ j(below, &negative_sign, Label::kNear); |
| 3471 | 3506 |
| 3472 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3507 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3473 // Check for negative zero. | 3508 // Check for negative zero. |
| 3474 Label positive_sign; | 3509 Label positive_sign; |
| 3475 __ j(above, &positive_sign, Label::kNear); | 3510 __ j(above, &positive_sign, Label::kNear); |
| 3476 __ movmskpd(output_reg, input_reg); | 3511 __ movmskpd(output_reg, input_reg); |
| 3477 __ testq(output_reg, Immediate(1)); | 3512 __k testq(output_reg, Immediate(1)); |
| 3478 DeoptimizeIf(not_zero, instr->environment()); | 3513 DeoptimizeIf(not_zero, instr->environment()); |
| 3479 __ Set(output_reg, 0); | 3514 __ Set(output_reg, 0); |
| 3480 __ jmp(&done); | 3515 __ jmp(&done); |
| 3481 __ bind(&positive_sign); | 3516 __ bind(&positive_sign); |
| 3482 } | 3517 } |
| 3483 | 3518 |
| 3484 // Use truncating instruction (OK because input is positive). | 3519 // Use truncating instruction (OK because input is positive). |
| 3485 __ cvttsd2si(output_reg, input_reg); | 3520 __ cvttsd2si(output_reg, input_reg); |
| 3486 // Overflow is signalled with minint. | 3521 // Overflow is signalled with minint. |
| 3487 __ cmpl(output_reg, Immediate(0x80000000)); | 3522 __ cmpl(output_reg, Immediate(0x80000000)); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 3504 | 3539 |
| 3505 | 3540 |
| 3506 void LCodeGen::DoMathRound(LMathRound* instr) { | 3541 void LCodeGen::DoMathRound(LMathRound* instr) { |
| 3507 const XMMRegister xmm_scratch = xmm0; | 3542 const XMMRegister xmm_scratch = xmm0; |
| 3508 Register output_reg = ToRegister(instr->result()); | 3543 Register output_reg = ToRegister(instr->result()); |
| 3509 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3544 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3510 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 | 3545 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 |
| 3511 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 | 3546 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 |
| 3512 | 3547 |
| 3513 Label done, round_to_zero, below_one_half, do_not_compensate, restore; | 3548 Label done, round_to_zero, below_one_half, do_not_compensate, restore; |
| 3514 __ movq(kScratchRegister, one_half, RelocInfo::NONE64); | 3549 __k movq(kScratchRegister, one_half, RelocInfo::NONE64); |
| 3515 __ movq(xmm_scratch, kScratchRegister); | 3550 __ movq(xmm_scratch, kScratchRegister); |
| 3516 __ ucomisd(xmm_scratch, input_reg); | 3551 __ ucomisd(xmm_scratch, input_reg); |
| 3517 __ j(above, &below_one_half); | 3552 __ j(above, &below_one_half); |
| 3518 | 3553 |
| 3519 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). | 3554 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). |
| 3520 __ addsd(xmm_scratch, input_reg); | 3555 __ addsd(xmm_scratch, input_reg); |
| 3521 __ cvttsd2si(output_reg, xmm_scratch); | 3556 __ cvttsd2si(output_reg, xmm_scratch); |
| 3522 // Overflow is signalled with minint. | 3557 // Overflow is signalled with minint. |
| 3523 __ cmpl(output_reg, Immediate(0x80000000)); | 3558 __ cmpl(output_reg, Immediate(0x80000000)); |
| 3524 __ RecordComment("D2I conversion overflow"); | 3559 __ RecordComment("D2I conversion overflow"); |
| 3525 DeoptimizeIf(equal, instr->environment()); | 3560 DeoptimizeIf(equal, instr->environment()); |
| 3526 __ jmp(&done); | 3561 __ jmp(&done); |
| 3527 | 3562 |
| 3528 __ bind(&below_one_half); | 3563 __ bind(&below_one_half); |
| 3529 __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64); | 3564 __k movq(kScratchRegister, minus_one_half, RelocInfo::NONE64); |
| 3530 __ movq(xmm_scratch, kScratchRegister); | 3565 __ movq(xmm_scratch, kScratchRegister); |
| 3531 __ ucomisd(xmm_scratch, input_reg); | 3566 __ ucomisd(xmm_scratch, input_reg); |
| 3532 __ j(below_equal, &round_to_zero); | 3567 __ j(below_equal, &round_to_zero); |
| 3533 | 3568 |
| 3534 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then | 3569 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then |
| 3535 // compare and compensate. | 3570 // compare and compensate. |
| 3536 __ movq(kScratchRegister, input_reg); // Back up input_reg. | 3571 __k movq(kScratchRegister, input_reg); // Back up input_reg. |
| 3537 __ subsd(input_reg, xmm_scratch); | 3572 __ subsd(input_reg, xmm_scratch); |
| 3538 __ cvttsd2si(output_reg, input_reg); | 3573 __ cvttsd2si(output_reg, input_reg); |
| 3539 // Catch minint due to overflow, and to prevent overflow when compensating. | 3574 // Catch minint due to overflow, and to prevent overflow when compensating. |
| 3540 __ cmpl(output_reg, Immediate(0x80000000)); | 3575 __ cmpl(output_reg, Immediate(0x80000000)); |
| 3541 __ RecordComment("D2I conversion overflow"); | 3576 __ RecordComment("D2I conversion overflow"); |
| 3542 DeoptimizeIf(equal, instr->environment()); | 3577 DeoptimizeIf(equal, instr->environment()); |
| 3543 | 3578 |
| 3544 __ cvtlsi2sd(xmm_scratch, output_reg); | 3579 __ cvtlsi2sd(xmm_scratch, output_reg); |
| 3545 __ ucomisd(input_reg, xmm_scratch); | 3580 __ ucomisd(input_reg, xmm_scratch); |
| 3546 __ j(equal, &restore, Label::kNear); | 3581 __ j(equal, &restore, Label::kNear); |
| 3547 __ subl(output_reg, Immediate(1)); | 3582 __ subl(output_reg, Immediate(1)); |
| 3548 // No overflow because we already ruled out minint. | 3583 // No overflow because we already ruled out minint. |
| 3549 __ bind(&restore); | 3584 __ bind(&restore); |
| 3550 __ movq(input_reg, kScratchRegister); // Restore input_reg. | 3585 __k movq(input_reg, kScratchRegister); // Restore input_reg. |
| 3551 __ jmp(&done); | 3586 __ jmp(&done); |
| 3552 | 3587 |
| 3553 __ bind(&round_to_zero); | 3588 __ bind(&round_to_zero); |
| 3554 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if | 3589 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if |
| 3555 // we can ignore the difference between a result of -0 and +0. | 3590 // we can ignore the difference between a result of -0 and +0. |
| 3556 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3591 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3557 __ movq(output_reg, input_reg); | 3592 __k movq(output_reg, input_reg); |
| 3558 __ testq(output_reg, output_reg); | 3593 __k testq(output_reg, output_reg); |
| 3559 __ RecordComment("Minus zero"); | 3594 __ RecordComment("Minus zero"); |
| 3560 DeoptimizeIf(negative, instr->environment()); | 3595 DeoptimizeIf(negative, instr->environment()); |
| 3561 } | 3596 } |
| 3562 __ Set(output_reg, 0); | 3597 __ Set(output_reg, 0); |
| 3563 __ bind(&done); | 3598 __ bind(&done); |
| 3564 } | 3599 } |
| 3565 | 3600 |
| 3566 | 3601 |
| 3567 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { | 3602 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { |
| 3568 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3603 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3659 // calling convention. | 3694 // calling convention. |
| 3660 #ifdef _WIN64 | 3695 #ifdef _WIN64 |
| 3661 ASSERT(ToRegister(instr->global_object()).is(rcx)); | 3696 ASSERT(ToRegister(instr->global_object()).is(rcx)); |
| 3662 Register global_object = rcx; | 3697 Register global_object = rcx; |
| 3663 #else | 3698 #else |
| 3664 ASSERT(ToRegister(instr->global_object()).is(rdi)); | 3699 ASSERT(ToRegister(instr->global_object()).is(rdi)); |
| 3665 Register global_object = rdi; | 3700 Register global_object = rdi; |
| 3666 #endif | 3701 #endif |
| 3667 | 3702 |
| 3668 static const int kSeedSize = sizeof(uint32_t); | 3703 static const int kSeedSize = sizeof(uint32_t); |
| 3704 #ifndef V8_TARGET_ARCH_X32 |
| 3669 STATIC_ASSERT(kPointerSize == 2 * kSeedSize); | 3705 STATIC_ASSERT(kPointerSize == 2 * kSeedSize); |
| 3706 #endif |
| 3670 | 3707 |
| 3671 __ movq(global_object, | 3708 __ movq(global_object, |
| 3672 FieldOperand(global_object, GlobalObject::kNativeContextOffset)); | 3709 FieldOperand(global_object, GlobalObject::kNativeContextOffset)); |
| 3673 static const int kRandomSeedOffset = | 3710 static const int kRandomSeedOffset = |
| 3674 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize; | 3711 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize; |
| 3675 __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset)); | 3712 __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset)); |
| 3676 // rbx: FixedArray of the native context's random seeds | 3713 // rbx: FixedArray of the native context's random seeds |
| 3677 | 3714 |
| 3678 // Load state[0]. | 3715 // Load state[0]. |
| 3679 __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize)); | 3716 __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize)); |
| (...skipping 397 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4077 // Even though the HLoad/StoreKeyedFastElement instructions force | 4114 // Even though the HLoad/StoreKeyedFastElement instructions force |
| 4078 // the input representation for the key to be an integer, the input | 4115 // the input representation for the key to be an integer, the input |
| 4079 // gets replaced during bound check elimination with the index | 4116 // gets replaced during bound check elimination with the index |
| 4080 // argument to the bounds check, which can be tagged, so that case | 4117 // argument to the bounds check, which can be tagged, so that case |
| 4081 // must be handled here, too. | 4118 // must be handled here, too. |
| 4082 if (instr->hydrogen()->key()->representation().IsSmi()) { | 4119 if (instr->hydrogen()->key()->representation().IsSmi()) { |
| 4083 __ SmiToInteger64(key_reg, key_reg); | 4120 __ SmiToInteger64(key_reg, key_reg); |
| 4084 } else if (instr->hydrogen()->IsDehoisted()) { | 4121 } else if (instr->hydrogen()->IsDehoisted()) { |
| 4085 // Sign extend key because it could be a 32 bit negative value | 4122 // Sign extend key because it could be a 32 bit negative value |
| 4086 // and the dehoisted address computation happens in 64 bits | 4123 // and the dehoisted address computation happens in 64 bits |
| 4087 __ movsxlq(key_reg, key_reg); | 4124 __k movsxlq(key_reg, key_reg); |
| 4088 } | 4125 } |
| 4089 } | 4126 } |
| 4090 Operand operand(BuildFastArrayOperand( | 4127 Operand operand(BuildFastArrayOperand( |
| 4091 instr->elements(), | 4128 instr->elements(), |
| 4092 key, | 4129 key, |
| 4093 elements_kind, | 4130 elements_kind, |
| 4094 0, | 4131 0, |
| 4095 instr->additional_index())); | 4132 instr->additional_index())); |
| 4096 | 4133 |
| 4097 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 4134 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4141 // Even though the HLoad/StoreKeyedFastElement instructions force | 4178 // Even though the HLoad/StoreKeyedFastElement instructions force |
| 4142 // the input representation for the key to be an integer, the | 4179 // the input representation for the key to be an integer, the |
| 4143 // input gets replaced during bound check elimination with the index | 4180 // input gets replaced during bound check elimination with the index |
| 4144 // argument to the bounds check, which can be tagged, so that case | 4181 // argument to the bounds check, which can be tagged, so that case |
| 4145 // must be handled here, too. | 4182 // must be handled here, too. |
| 4146 if (instr->hydrogen()->key()->representation().IsSmi()) { | 4183 if (instr->hydrogen()->key()->representation().IsSmi()) { |
| 4147 __ SmiToInteger64(key_reg, key_reg); | 4184 __ SmiToInteger64(key_reg, key_reg); |
| 4148 } else if (instr->hydrogen()->IsDehoisted()) { | 4185 } else if (instr->hydrogen()->IsDehoisted()) { |
| 4149 // Sign extend key because it could be a 32 bit negative value | 4186 // Sign extend key because it could be a 32 bit negative value |
| 4150 // and the dehoisted address computation happens in 64 bits | 4187 // and the dehoisted address computation happens in 64 bits |
| 4151 __ movsxlq(key_reg, key_reg); | 4188 __k movsxlq(key_reg, key_reg); |
| 4152 } | 4189 } |
| 4153 } | 4190 } |
| 4154 | 4191 |
| 4155 if (instr->NeedsCanonicalization()) { | 4192 if (instr->NeedsCanonicalization()) { |
| 4156 Label have_value; | 4193 Label have_value; |
| 4157 | 4194 |
| 4158 __ ucomisd(value, value); | 4195 __ ucomisd(value, value); |
| 4159 __ j(parity_odd, &have_value); // NaN. | 4196 __ j(parity_odd, &have_value); // NaN. |
| 4160 | 4197 |
| 4161 __ Set(kScratchRegister, BitCast<uint64_t>( | 4198 __ Set(kScratchRegister, BitCast<uint64_t>( |
| 4162 FixedDoubleArray::canonical_not_the_hole_nan_as_double())); | 4199 FixedDoubleArray::canonical_not_the_hole_nan_as_double())); |
| 4163 __ movq(value, kScratchRegister); | 4200 __k movq(value, kScratchRegister); |
| 4164 | 4201 |
| 4165 __ bind(&have_value); | 4202 __ bind(&have_value); |
| 4166 } | 4203 } |
| 4167 | 4204 |
| 4168 Operand double_store_operand = BuildFastArrayOperand( | 4205 Operand double_store_operand = BuildFastArrayOperand( |
| 4169 instr->elements(), | 4206 instr->elements(), |
| 4170 key, | 4207 key, |
| 4171 FAST_DOUBLE_ELEMENTS, | 4208 FAST_DOUBLE_ELEMENTS, |
| 4172 FixedDoubleArray::kHeaderSize - kHeapObjectTag, | 4209 FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
| 4173 instr->additional_index()); | 4210 instr->additional_index()); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 4184 // Even though the HLoad/StoreKeyedFastElement instructions force | 4221 // Even though the HLoad/StoreKeyedFastElement instructions force |
| 4185 // the input representation for the key to be an integer, the | 4222 // the input representation for the key to be an integer, the |
| 4186 // input gets replaced during bound check elimination with the index | 4223 // input gets replaced during bound check elimination with the index |
| 4187 // argument to the bounds check, which can be tagged, so that case | 4224 // argument to the bounds check, which can be tagged, so that case |
| 4188 // must be handled here, too. | 4225 // must be handled here, too. |
| 4189 if (instr->hydrogen()->key()->representation().IsSmi()) { | 4226 if (instr->hydrogen()->key()->representation().IsSmi()) { |
| 4190 __ SmiToInteger64(key_reg, key_reg); | 4227 __ SmiToInteger64(key_reg, key_reg); |
| 4191 } else if (instr->hydrogen()->IsDehoisted()) { | 4228 } else if (instr->hydrogen()->IsDehoisted()) { |
| 4192 // Sign extend key because it could be a 32 bit negative value | 4229 // Sign extend key because it could be a 32 bit negative value |
| 4193 // and the dehoisted address computation happens in 64 bits | 4230 // and the dehoisted address computation happens in 64 bits |
| 4194 __ movsxlq(key_reg, key_reg); | 4231 __k movsxlq(key_reg, key_reg); |
| 4195 } | 4232 } |
| 4196 } | 4233 } |
| 4197 | 4234 |
| 4198 Operand operand = | 4235 Operand operand = |
| 4199 BuildFastArrayOperand(instr->elements(), | 4236 BuildFastArrayOperand(instr->elements(), |
| 4200 key, | 4237 key, |
| 4201 FAST_ELEMENTS, | 4238 FAST_ELEMENTS, |
| 4202 FixedArray::kHeaderSize - kHeapObjectTag, | 4239 FixedArray::kHeaderSize - kHeapObjectTag, |
| 4203 instr->additional_index()); | 4240 instr->additional_index()); |
| 4204 if (instr->value()->IsRegister()) { | 4241 if (instr->value()->IsRegister()) { |
| (...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4395 DeferredStringCharFromCode* deferred = | 4432 DeferredStringCharFromCode* deferred = |
| 4396 new(zone()) DeferredStringCharFromCode(this, instr); | 4433 new(zone()) DeferredStringCharFromCode(this, instr); |
| 4397 | 4434 |
| 4398 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); | 4435 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); |
| 4399 Register char_code = ToRegister(instr->char_code()); | 4436 Register char_code = ToRegister(instr->char_code()); |
| 4400 Register result = ToRegister(instr->result()); | 4437 Register result = ToRegister(instr->result()); |
| 4401 ASSERT(!char_code.is(result)); | 4438 ASSERT(!char_code.is(result)); |
| 4402 | 4439 |
| 4403 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode)); | 4440 __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode)); |
| 4404 __ j(above, deferred->entry()); | 4441 __ j(above, deferred->entry()); |
| 4405 __ movsxlq(char_code, char_code); | 4442 __k movsxlq(char_code, char_code); |
| 4406 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); | 4443 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); |
| 4407 __ movq(result, FieldOperand(result, | 4444 __ movq(result, FieldOperand(result, |
| 4408 char_code, times_pointer_size, | 4445 char_code, times_pointer_size, |
| 4409 FixedArray::kHeaderSize)); | 4446 FixedArray::kHeaderSize)); |
| 4410 __ CompareRoot(result, Heap::kUndefinedValueRootIndex); | 4447 __ CompareRoot(result, Heap::kUndefinedValueRootIndex); |
| 4411 __ j(equal, deferred->entry()); | 4448 __ j(equal, deferred->entry()); |
| 4412 __ bind(deferred->exit()); | 4449 __ bind(deferred->exit()); |
| 4413 } | 4450 } |
| 4414 | 4451 |
| 4415 | 4452 |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4467 LOperand* output = instr->result(); | 4504 LOperand* output = instr->result(); |
| 4468 LOperand* temp = instr->temp(); | 4505 LOperand* temp = instr->temp(); |
| 4469 | 4506 |
| 4470 __ LoadUint32(ToDoubleRegister(output), | 4507 __ LoadUint32(ToDoubleRegister(output), |
| 4471 ToRegister(input), | 4508 ToRegister(input), |
| 4472 ToDoubleRegister(temp)); | 4509 ToDoubleRegister(temp)); |
| 4473 } | 4510 } |
| 4474 | 4511 |
| 4475 | 4512 |
| 4476 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { | 4513 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
| 4514 #ifndef V8_TARGET_ARCH_X32 |
| 4477 LOperand* input = instr->value(); | 4515 LOperand* input = instr->value(); |
| 4478 ASSERT(input->IsRegister() && input->Equals(instr->result())); | 4516 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
| 4479 Register reg = ToRegister(input); | 4517 Register reg = ToRegister(input); |
| 4480 | 4518 |
| 4481 __ Integer32ToSmi(reg, reg); | 4519 __ Integer32ToSmi(reg, reg); |
| 4520 #else |
| 4521 class DeferredNumberTagI: public LDeferredCode { |
| 4522 public: |
| 4523 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) |
| 4524 : LDeferredCode(codegen), instr_(instr) { } |
| 4525 virtual void Generate() { |
| 4526 codegen()->DoDeferredNumberTagI(instr_); |
| 4527 } |
| 4528 virtual LInstruction* instr() { return instr_; } |
| 4529 private: |
| 4530 LNumberTagI* instr_; |
| 4531 }; |
| 4532 |
| 4533 LOperand* input = instr->value(); |
| 4534 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
| 4535 Register reg = ToRegister(input); |
| 4536 |
| 4537 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); |
| 4538 __ Integer32ToSmi(reg, reg); |
| 4539 __ j(overflow, deferred->entry()); |
| 4540 __ bind(deferred->exit()); |
| 4541 #endif |
| 4482 } | 4542 } |
| 4483 | 4543 |
| 4484 | 4544 |
| 4545 #ifdef V8_TARGET_ARCH_X32 |
| 4546 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { |
| 4547 Label slow; |
| 4548 Register reg = ToRegister(instr->value()); |
| 4549 Register tmp = reg.is(rax) ? kScratchRegister : rax; |
| 4550 |
| 4551 // Preserve the value of all registers. |
| 4552 PushSafepointRegistersScope scope(this); |
| 4553 |
| 4554 Label done; |
| 4555 // There was overflow, so bits 30 and 31 of the original integer |
| 4556 // disagree. Try to allocate a heap number in new space and store |
| 4557 // the value in there. If that fails, call the runtime system. |
| 4558 __ SmiToInteger32(reg, reg); |
| 4559 __ xorl(reg, Immediate(0x80000000)); |
| 4560 __ cvtlsi2sd(xmm0, reg); |
| 4561 |
| 4562 if (FLAG_inline_new) { |
| 4563 __ AllocateHeapNumber(reg, tmp, &slow); |
| 4564 __ jmp(&done, Label::kNear); |
| 4565 } |
| 4566 |
| 4567 // Slow case: Call the runtime system to do the number allocation. |
| 4568 __ bind(&slow); |
| 4569 |
| 4570 // Put a valid pointer value in the stack slot where the result |
| 4571 // register is stored, as this register is in the pointer map, but contains an |
| 4572 // integer value. |
| 4573 __ StoreToSafepointRegisterSlot(reg, Immediate(0)); |
| 4574 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); |
| 4575 // Set the pointer to the new heap number in tmp. |
| 4576 if (!reg.is(rax)) __ movl(reg, rax); |
| 4577 |
| 4578 // Heap number allocated. Put the value in xmm0 into the value of the |
| 4579 // allocated heap number. |
| 4580 __ bind(&done); |
| 4581 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); |
| 4582 __ StoreToSafepointRegisterSlot(reg, reg); |
| 4583 } |
| 4584 #endif |
| 4585 |
| 4586 |
| 4485 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { | 4587 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { |
| 4486 class DeferredNumberTagU: public LDeferredCode { | 4588 class DeferredNumberTagU: public LDeferredCode { |
| 4487 public: | 4589 public: |
| 4488 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) | 4590 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) |
| 4489 : LDeferredCode(codegen), instr_(instr) { } | 4591 : LDeferredCode(codegen), instr_(instr) { } |
| 4490 virtual void Generate() { | 4592 virtual void Generate() { |
| 4491 codegen()->DoDeferredNumberTagU(instr_); | 4593 codegen()->DoDeferredNumberTagU(instr_); |
| 4492 } | 4594 } |
| 4493 virtual LInstruction* instr() { return instr_; } | 4595 virtual LInstruction* instr() { return instr_; } |
| 4494 private: | 4596 private: |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4579 Immediate(kHoleNanUpper32)); | 4681 Immediate(kHoleNanUpper32)); |
| 4580 Label canonicalize; | 4682 Label canonicalize; |
| 4581 __ j(not_equal, &canonicalize); | 4683 __ j(not_equal, &canonicalize); |
| 4582 __ addq(rsp, Immediate(kDoubleSize)); | 4684 __ addq(rsp, Immediate(kDoubleSize)); |
| 4583 __ Move(reg, factory()->the_hole_value()); | 4685 __ Move(reg, factory()->the_hole_value()); |
| 4584 __ jmp(&done); | 4686 __ jmp(&done); |
| 4585 __ bind(&canonicalize); | 4687 __ bind(&canonicalize); |
| 4586 __ addq(rsp, Immediate(kDoubleSize)); | 4688 __ addq(rsp, Immediate(kDoubleSize)); |
| 4587 __ Set(kScratchRegister, BitCast<uint64_t>( | 4689 __ Set(kScratchRegister, BitCast<uint64_t>( |
| 4588 FixedDoubleArray::canonical_not_the_hole_nan_as_double())); | 4690 FixedDoubleArray::canonical_not_the_hole_nan_as_double())); |
| 4589 __ movq(input_reg, kScratchRegister); | 4691 __k movq(input_reg, kScratchRegister); |
| 4590 } | 4692 } |
| 4591 | 4693 |
| 4592 __ bind(&no_special_nan_handling); | 4694 __ bind(&no_special_nan_handling); |
| 4593 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | 4695 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
| 4594 if (FLAG_inline_new) { | 4696 if (FLAG_inline_new) { |
| 4595 __ AllocateHeapNumber(reg, tmp, deferred->entry()); | 4697 __ AllocateHeapNumber(reg, tmp, deferred->entry()); |
| 4596 } else { | 4698 } else { |
| 4597 __ jmp(deferred->entry()); | 4699 __ jmp(deferred->entry()); |
| 4598 } | 4700 } |
| 4599 __ bind(deferred->exit()); | 4701 __ bind(deferred->exit()); |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4680 __ bind(&heap_number); | 4782 __ bind(&heap_number); |
| 4681 } | 4783 } |
| 4682 // Heap number to XMM conversion. | 4784 // Heap number to XMM conversion. |
| 4683 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4785 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4684 if (deoptimize_on_minus_zero) { | 4786 if (deoptimize_on_minus_zero) { |
| 4685 XMMRegister xmm_scratch = xmm0; | 4787 XMMRegister xmm_scratch = xmm0; |
| 4686 __ xorps(xmm_scratch, xmm_scratch); | 4788 __ xorps(xmm_scratch, xmm_scratch); |
| 4687 __ ucomisd(xmm_scratch, result_reg); | 4789 __ ucomisd(xmm_scratch, result_reg); |
| 4688 __ j(not_equal, &done, Label::kNear); | 4790 __ j(not_equal, &done, Label::kNear); |
| 4689 __ movmskpd(kScratchRegister, result_reg); | 4791 __ movmskpd(kScratchRegister, result_reg); |
| 4690 __ testq(kScratchRegister, Immediate(1)); | 4792 __k testq(kScratchRegister, Immediate(1)); |
| 4691 DeoptimizeIf(not_zero, env); | 4793 DeoptimizeIf(not_zero, env); |
| 4692 } | 4794 } |
| 4693 __ jmp(&done, Label::kNear); | 4795 __ jmp(&done, Label::kNear); |
| 4694 } else { | 4796 } else { |
| 4695 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); | 4797 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); |
| 4696 } | 4798 } |
| 4697 | 4799 |
| 4698 // Smi to XMM conversion | 4800 // Smi to XMM conversion |
| 4699 __ bind(&load_smi); | 4801 __ bind(&load_smi); |
| 4700 __ SmiToInteger32(kScratchRegister, input_reg); | 4802 __ SmiToInteger32(kScratchRegister, input_reg); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 4718 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); | 4820 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); |
| 4719 DeoptimizeIf(not_equal, instr->environment()); | 4821 DeoptimizeIf(not_equal, instr->environment()); |
| 4720 __ Set(input_reg, 0); | 4822 __ Set(input_reg, 0); |
| 4721 __ jmp(&done, Label::kNear); | 4823 __ jmp(&done, Label::kNear); |
| 4722 | 4824 |
| 4723 __ bind(&heap_number); | 4825 __ bind(&heap_number); |
| 4724 | 4826 |
| 4725 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4827 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4726 __ cvttsd2siq(input_reg, xmm0); | 4828 __ cvttsd2siq(input_reg, xmm0); |
| 4727 __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000)); | 4829 __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000)); |
| 4728 __ cmpq(input_reg, kScratchRegister); | 4830 __k cmpq(input_reg, kScratchRegister); |
| 4729 DeoptimizeIf(equal, instr->environment()); | 4831 DeoptimizeIf(equal, instr->environment()); |
| 4730 } else { | 4832 } else { |
| 4731 // Deoptimize if we don't have a heap number. | 4833 // Deoptimize if we don't have a heap number. |
| 4732 DeoptimizeIf(not_equal, instr->environment()); | 4834 DeoptimizeIf(not_equal, instr->environment()); |
| 4733 | 4835 |
| 4734 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); | 4836 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); |
| 4735 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4837 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4736 __ cvttsd2si(input_reg, xmm0); | 4838 __ cvttsd2si(input_reg, xmm0); |
| 4737 __ cvtlsi2sd(xmm_temp, input_reg); | 4839 __ cvtlsi2sd(xmm_temp, input_reg); |
| 4738 __ ucomisd(xmm0, xmm_temp); | 4840 __ ucomisd(xmm0, xmm_temp); |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4807 LOperand* result = instr->result(); | 4909 LOperand* result = instr->result(); |
| 4808 ASSERT(result->IsRegister()); | 4910 ASSERT(result->IsRegister()); |
| 4809 | 4911 |
| 4810 XMMRegister input_reg = ToDoubleRegister(input); | 4912 XMMRegister input_reg = ToDoubleRegister(input); |
| 4811 Register result_reg = ToRegister(result); | 4913 Register result_reg = ToRegister(result); |
| 4812 | 4914 |
| 4813 if (instr->truncating()) { | 4915 if (instr->truncating()) { |
| 4814 // Performs a truncating conversion of a floating point number as used by | 4916 // Performs a truncating conversion of a floating point number as used by |
| 4815 // the JS bitwise operations. | 4917 // the JS bitwise operations. |
| 4816 __ cvttsd2siq(result_reg, input_reg); | 4918 __ cvttsd2siq(result_reg, input_reg); |
| 4817 __ movq(kScratchRegister, | 4919 __k movq(kScratchRegister, |
| 4818 V8_INT64_C(0x8000000000000000), | 4920 V8_INT64_C(0x8000000000000000), |
| 4819 RelocInfo::NONE64); | 4921 RelocInfo::NONE64); |
| 4820 __ cmpq(result_reg, kScratchRegister); | 4922 __k cmpq(result_reg, kScratchRegister); |
| 4821 DeoptimizeIf(equal, instr->environment()); | 4923 DeoptimizeIf(equal, instr->environment()); |
| 4822 } else { | 4924 } else { |
| 4823 __ cvttsd2si(result_reg, input_reg); | 4925 __ cvttsd2si(result_reg, input_reg); |
| 4824 __ cvtlsi2sd(xmm0, result_reg); | 4926 __ cvtlsi2sd(xmm0, result_reg); |
| 4825 __ ucomisd(xmm0, input_reg); | 4927 __ ucomisd(xmm0, input_reg); |
| 4826 DeoptimizeIf(not_equal, instr->environment()); | 4928 DeoptimizeIf(not_equal, instr->environment()); |
| 4827 DeoptimizeIf(parity_even, instr->environment()); // NaN. | 4929 DeoptimizeIf(parity_even, instr->environment()); // NaN. |
| 4828 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4930 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4829 Label done; | 4931 Label done; |
| 4830 // The integer converted back is equal to the original. We | 4932 // The integer converted back is equal to the original. We |
| (...skipping 810 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5641 __ negl(index); | 5743 __ negl(index); |
| 5642 // Index is now equal to out of object property index plus 1. | 5744 // Index is now equal to out of object property index plus 1. |
| 5643 __ movq(object, FieldOperand(object, | 5745 __ movq(object, FieldOperand(object, |
| 5644 index, | 5746 index, |
| 5645 times_pointer_size, | 5747 times_pointer_size, |
| 5646 FixedArray::kHeaderSize - kPointerSize)); | 5748 FixedArray::kHeaderSize - kPointerSize)); |
| 5647 __ bind(&done); | 5749 __ bind(&done); |
| 5648 } | 5750 } |
| 5649 | 5751 |
| 5650 | 5752 |
| 5753 #undef __n |
| 5754 #undef __q |
| 5755 #undef __k |
| 5651 #undef __ | 5756 #undef __ |
| 5652 | 5757 |
| 5653 } } // namespace v8::internal | 5758 } } // namespace v8::internal |
| 5654 | 5759 |
| 5655 #endif // V8_TARGET_ARCH_X64 | 5760 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |