| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 1093 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1104 | 1104 |
| 1105 | 1105 |
| 1106 void LCodeGen::DoConstantD(LConstantD* instr) { | 1106 void LCodeGen::DoConstantD(LConstantD* instr) { |
| 1107 ASSERT(instr->result()->IsDoubleRegister()); | 1107 ASSERT(instr->result()->IsDoubleRegister()); |
| 1108 XMMRegister res = ToDoubleRegister(instr->result()); | 1108 XMMRegister res = ToDoubleRegister(instr->result()); |
| 1109 double v = instr->value(); | 1109 double v = instr->value(); |
| 1110 uint64_t int_val = BitCast<uint64_t, double>(v); | 1110 uint64_t int_val = BitCast<uint64_t, double>(v); |
| 1111 // Use xor to produce +0.0 in a fast and compact way, but avoid to | 1111 // Use xor to produce +0.0 in a fast and compact way, but avoid to |
| 1112 // do so if the constant is -0.0. | 1112 // do so if the constant is -0.0. |
| 1113 if (int_val == 0) { | 1113 if (int_val == 0) { |
| 1114 __ xorpd(res, res); | 1114 __ xorps(res, res); |
| 1115 } else { | 1115 } else { |
| 1116 Register tmp = ToRegister(instr->TempAt(0)); | 1116 Register tmp = ToRegister(instr->TempAt(0)); |
| 1117 __ Set(tmp, int_val); | 1117 __ Set(tmp, int_val); |
| 1118 __ movq(res, tmp); | 1118 __ movq(res, tmp); |
| 1119 } | 1119 } |
| 1120 } | 1120 } |
| 1121 | 1121 |
| 1122 | 1122 |
| 1123 void LCodeGen::DoConstantT(LConstantT* instr) { | 1123 void LCodeGen::DoConstantT(LConstantT* instr) { |
| 1124 ASSERT(instr->result()->IsRegister()); | 1124 ASSERT(instr->result()->IsRegister()); |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1216 __ subsd(left, right); | 1216 __ subsd(left, right); |
| 1217 break; | 1217 break; |
| 1218 case Token::MUL: | 1218 case Token::MUL: |
| 1219 __ mulsd(left, right); | 1219 __ mulsd(left, right); |
| 1220 break; | 1220 break; |
| 1221 case Token::DIV: | 1221 case Token::DIV: |
| 1222 __ divsd(left, right); | 1222 __ divsd(left, right); |
| 1223 break; | 1223 break; |
| 1224 case Token::MOD: | 1224 case Token::MOD: |
| 1225 __ PrepareCallCFunction(2); | 1225 __ PrepareCallCFunction(2); |
| 1226 __ movsd(xmm0, left); | 1226 __ movaps(xmm0, left); |
| 1227 ASSERT(right.is(xmm1)); | 1227 ASSERT(right.is(xmm1)); |
| 1228 __ CallCFunction( | 1228 __ CallCFunction( |
| 1229 ExternalReference::double_fp_operation(Token::MOD, isolate()), 2); | 1229 ExternalReference::double_fp_operation(Token::MOD, isolate()), 2); |
| 1230 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); | 1230 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
| 1231 __ movsd(result, xmm0); | 1231 __ movaps(result, xmm0); |
| 1232 break; | 1232 break; |
| 1233 default: | 1233 default: |
| 1234 UNREACHABLE(); | 1234 UNREACHABLE(); |
| 1235 break; | 1235 break; |
| 1236 } | 1236 } |
| 1237 } | 1237 } |
| 1238 | 1238 |
| 1239 | 1239 |
| 1240 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | 1240 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
| 1241 ASSERT(ToRegister(instr->InputAt(0)).is(rdx)); | 1241 ASSERT(ToRegister(instr->InputAt(0)).is(rdx)); |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1280 int true_block = chunk_->LookupDestination(instr->true_block_id()); | 1280 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1281 int false_block = chunk_->LookupDestination(instr->false_block_id()); | 1281 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1282 | 1282 |
| 1283 Representation r = instr->hydrogen()->representation(); | 1283 Representation r = instr->hydrogen()->representation(); |
| 1284 if (r.IsInteger32()) { | 1284 if (r.IsInteger32()) { |
| 1285 Register reg = ToRegister(instr->InputAt(0)); | 1285 Register reg = ToRegister(instr->InputAt(0)); |
| 1286 __ testl(reg, reg); | 1286 __ testl(reg, reg); |
| 1287 EmitBranch(true_block, false_block, not_zero); | 1287 EmitBranch(true_block, false_block, not_zero); |
| 1288 } else if (r.IsDouble()) { | 1288 } else if (r.IsDouble()) { |
| 1289 XMMRegister reg = ToDoubleRegister(instr->InputAt(0)); | 1289 XMMRegister reg = ToDoubleRegister(instr->InputAt(0)); |
| 1290 __ xorpd(xmm0, xmm0); | 1290 __ xorps(xmm0, xmm0); |
| 1291 __ ucomisd(reg, xmm0); | 1291 __ ucomisd(reg, xmm0); |
| 1292 EmitBranch(true_block, false_block, not_equal); | 1292 EmitBranch(true_block, false_block, not_equal); |
| 1293 } else { | 1293 } else { |
| 1294 ASSERT(r.IsTagged()); | 1294 ASSERT(r.IsTagged()); |
| 1295 Register reg = ToRegister(instr->InputAt(0)); | 1295 Register reg = ToRegister(instr->InputAt(0)); |
| 1296 HType type = instr->hydrogen()->type(); | 1296 HType type = instr->hydrogen()->type(); |
| 1297 if (type.IsBoolean()) { | 1297 if (type.IsBoolean()) { |
| 1298 __ CompareRoot(reg, Heap::kTrueValueRootIndex); | 1298 __ CompareRoot(reg, Heap::kTrueValueRootIndex); |
| 1299 EmitBranch(true_block, false_block, equal); | 1299 EmitBranch(true_block, false_block, equal); |
| 1300 } else if (type.IsSmi()) { | 1300 } else if (type.IsSmi()) { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 1315 __ JumpIfSmi(reg, true_label); | 1315 __ JumpIfSmi(reg, true_label); |
| 1316 | 1316 |
| 1317 // Test for double values. Plus/minus zero and NaN are false. | 1317 // Test for double values. Plus/minus zero and NaN are false. |
| 1318 NearLabel call_stub; | 1318 NearLabel call_stub; |
| 1319 __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset), | 1319 __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset), |
| 1320 Heap::kHeapNumberMapRootIndex); | 1320 Heap::kHeapNumberMapRootIndex); |
| 1321 __ j(not_equal, &call_stub); | 1321 __ j(not_equal, &call_stub); |
| 1322 | 1322 |
| 1323 // HeapNumber => false iff +0, -0, or NaN. These three cases set the | 1323 // HeapNumber => false iff +0, -0, or NaN. These three cases set the |
| 1324 // zero flag when compared to zero using ucomisd. | 1324 // zero flag when compared to zero using ucomisd. |
| 1325 __ xorpd(xmm0, xmm0); | 1325 __ xorps(xmm0, xmm0); |
| 1326 __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset)); | 1326 __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset)); |
| 1327 __ j(zero, false_label); | 1327 __ j(zero, false_label); |
| 1328 __ jmp(true_label); | 1328 __ jmp(true_label); |
| 1329 | 1329 |
| 1330 // The conversion stub doesn't cause garbage collections so it's | 1330 // The conversion stub doesn't cause garbage collections so it's |
| 1331 // safe to not record a safepoint after the call. | 1331 // safe to not record a safepoint after the call. |
| 1332 __ bind(&call_stub); | 1332 __ bind(&call_stub); |
| 1333 ToBooleanStub stub; | 1333 ToBooleanStub stub; |
| 1334 __ Pushad(); | 1334 __ Pushad(); |
| 1335 __ push(reg); | 1335 __ push(reg); |
| (...skipping 1328 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2664 private: | 2664 private: |
| 2665 LUnaryMathOperation* instr_; | 2665 LUnaryMathOperation* instr_; |
| 2666 }; | 2666 }; |
| 2667 | 2667 |
| 2668 ASSERT(instr->InputAt(0)->Equals(instr->result())); | 2668 ASSERT(instr->InputAt(0)->Equals(instr->result())); |
| 2669 Representation r = instr->hydrogen()->value()->representation(); | 2669 Representation r = instr->hydrogen()->value()->representation(); |
| 2670 | 2670 |
| 2671 if (r.IsDouble()) { | 2671 if (r.IsDouble()) { |
| 2672 XMMRegister scratch = xmm0; | 2672 XMMRegister scratch = xmm0; |
| 2673 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); | 2673 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); |
| 2674 __ xorpd(scratch, scratch); | 2674 __ xorps(scratch, scratch); |
| 2675 __ subsd(scratch, input_reg); | 2675 __ subsd(scratch, input_reg); |
| 2676 __ andpd(input_reg, scratch); | 2676 __ andpd(input_reg, scratch); |
| 2677 } else if (r.IsInteger32()) { | 2677 } else if (r.IsInteger32()) { |
| 2678 EmitIntegerMathAbs(instr); | 2678 EmitIntegerMathAbs(instr); |
| 2679 } else { // Tagged case. | 2679 } else { // Tagged case. |
| 2680 DeferredMathAbsTaggedHeapNumber* deferred = | 2680 DeferredMathAbsTaggedHeapNumber* deferred = |
| 2681 new DeferredMathAbsTaggedHeapNumber(this, instr); | 2681 new DeferredMathAbsTaggedHeapNumber(this, instr); |
| 2682 Register input_reg = ToRegister(instr->InputAt(0)); | 2682 Register input_reg = ToRegister(instr->InputAt(0)); |
| 2683 // Smi check. | 2683 // Smi check. |
| 2684 __ JumpIfNotSmi(input_reg, deferred->entry()); | 2684 __ JumpIfNotSmi(input_reg, deferred->entry()); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 2701 // Deoptimize if minus zero. | 2701 // Deoptimize if minus zero. |
| 2702 __ movq(output_reg, input_reg); | 2702 __ movq(output_reg, input_reg); |
| 2703 __ subq(output_reg, Immediate(1)); | 2703 __ subq(output_reg, Immediate(1)); |
| 2704 DeoptimizeIf(overflow, instr->environment()); | 2704 DeoptimizeIf(overflow, instr->environment()); |
| 2705 } | 2705 } |
| 2706 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); | 2706 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); |
| 2707 __ cvttsd2si(output_reg, xmm_scratch); | 2707 __ cvttsd2si(output_reg, xmm_scratch); |
| 2708 __ cmpl(output_reg, Immediate(0x80000000)); | 2708 __ cmpl(output_reg, Immediate(0x80000000)); |
| 2709 DeoptimizeIf(equal, instr->environment()); | 2709 DeoptimizeIf(equal, instr->environment()); |
| 2710 } else { | 2710 } else { |
| 2711 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register. | 2711 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. |
| 2712 __ ucomisd(input_reg, xmm_scratch); | 2712 __ ucomisd(input_reg, xmm_scratch); |
| 2713 | 2713 |
| 2714 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 2714 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 2715 DeoptimizeIf(below_equal, instr->environment()); | 2715 DeoptimizeIf(below_equal, instr->environment()); |
| 2716 } else { | 2716 } else { |
| 2717 DeoptimizeIf(below, instr->environment()); | 2717 DeoptimizeIf(below, instr->environment()); |
| 2718 } | 2718 } |
| 2719 | 2719 |
| 2720 // Use truncating instruction (OK because input is positive). | 2720 // Use truncating instruction (OK because input is positive). |
| 2721 __ cvttsd2si(output_reg, input_reg); | 2721 __ cvttsd2si(output_reg, input_reg); |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2777 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); | 2777 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); |
| 2778 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | 2778 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
| 2779 __ sqrtsd(input_reg, input_reg); | 2779 __ sqrtsd(input_reg, input_reg); |
| 2780 } | 2780 } |
| 2781 | 2781 |
| 2782 | 2782 |
| 2783 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { | 2783 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { |
| 2784 XMMRegister xmm_scratch = xmm0; | 2784 XMMRegister xmm_scratch = xmm0; |
| 2785 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); | 2785 XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); |
| 2786 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | 2786 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
| 2787 __ xorpd(xmm_scratch, xmm_scratch); | 2787 __ xorps(xmm_scratch, xmm_scratch); |
| 2788 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. | 2788 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. |
| 2789 __ sqrtsd(input_reg, input_reg); | 2789 __ sqrtsd(input_reg, input_reg); |
| 2790 } | 2790 } |
| 2791 | 2791 |
| 2792 | 2792 |
| 2793 void LCodeGen::DoPower(LPower* instr) { | 2793 void LCodeGen::DoPower(LPower* instr) { |
| 2794 LOperand* left = instr->InputAt(0); | 2794 LOperand* left = instr->InputAt(0); |
| 2795 XMMRegister left_reg = ToDoubleRegister(left); | 2795 XMMRegister left_reg = ToDoubleRegister(left); |
| 2796 ASSERT(!left_reg.is(xmm1)); | 2796 ASSERT(!left_reg.is(xmm1)); |
| 2797 LOperand* right = instr->InputAt(1); | 2797 LOperand* right = instr->InputAt(1); |
| 2798 XMMRegister result_reg = ToDoubleRegister(instr->result()); | 2798 XMMRegister result_reg = ToDoubleRegister(instr->result()); |
| 2799 Representation exponent_type = instr->hydrogen()->right()->representation(); | 2799 Representation exponent_type = instr->hydrogen()->right()->representation(); |
| 2800 if (exponent_type.IsDouble()) { | 2800 if (exponent_type.IsDouble()) { |
| 2801 __ PrepareCallCFunction(2); | 2801 __ PrepareCallCFunction(2); |
| 2802 // Move arguments to correct registers | 2802 // Move arguments to correct registers |
| 2803 __ movsd(xmm0, left_reg); | 2803 __ movaps(xmm0, left_reg); |
| 2804 ASSERT(ToDoubleRegister(right).is(xmm1)); | 2804 ASSERT(ToDoubleRegister(right).is(xmm1)); |
| 2805 __ CallCFunction( | 2805 __ CallCFunction( |
| 2806 ExternalReference::power_double_double_function(isolate()), 2); | 2806 ExternalReference::power_double_double_function(isolate()), 2); |
| 2807 } else if (exponent_type.IsInteger32()) { | 2807 } else if (exponent_type.IsInteger32()) { |
| 2808 __ PrepareCallCFunction(2); | 2808 __ PrepareCallCFunction(2); |
| 2809 // Move arguments to correct registers: xmm0 and edi (not rdi). | 2809 // Move arguments to correct registers: xmm0 and edi (not rdi). |
| 2810 // On Windows, the registers are xmm0 and edx. | 2810 // On Windows, the registers are xmm0 and edx. |
| 2811 __ movsd(xmm0, left_reg); | 2811 __ movaps(xmm0, left_reg); |
| 2812 #ifdef _WIN64 | 2812 #ifdef _WIN64 |
| 2813 ASSERT(ToRegister(right).is(rdx)); | 2813 ASSERT(ToRegister(right).is(rdx)); |
| 2814 #else | 2814 #else |
| 2815 ASSERT(ToRegister(right).is(rdi)); | 2815 ASSERT(ToRegister(right).is(rdi)); |
| 2816 #endif | 2816 #endif |
| 2817 __ CallCFunction( | 2817 __ CallCFunction( |
| 2818 ExternalReference::power_double_int_function(isolate()), 2); | 2818 ExternalReference::power_double_int_function(isolate()), 2); |
| 2819 } else { | 2819 } else { |
| 2820 ASSERT(exponent_type.IsTagged()); | 2820 ASSERT(exponent_type.IsTagged()); |
| 2821 Register right_reg = ToRegister(right); | 2821 Register right_reg = ToRegister(right); |
| 2822 | 2822 |
| 2823 Label non_smi, call; | 2823 Label non_smi, call; |
| 2824 __ JumpIfNotSmi(right_reg, &non_smi); | 2824 __ JumpIfNotSmi(right_reg, &non_smi); |
| 2825 __ SmiToInteger32(right_reg, right_reg); | 2825 __ SmiToInteger32(right_reg, right_reg); |
| 2826 __ cvtlsi2sd(xmm1, right_reg); | 2826 __ cvtlsi2sd(xmm1, right_reg); |
| 2827 __ jmp(&call); | 2827 __ jmp(&call); |
| 2828 | 2828 |
| 2829 __ bind(&non_smi); | 2829 __ bind(&non_smi); |
| 2830 __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister); | 2830 __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister); |
| 2831 DeoptimizeIf(not_equal, instr->environment()); | 2831 DeoptimizeIf(not_equal, instr->environment()); |
| 2832 __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset)); | 2832 __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset)); |
| 2833 | 2833 |
| 2834 __ bind(&call); | 2834 __ bind(&call); |
| 2835 __ PrepareCallCFunction(2); | 2835 __ PrepareCallCFunction(2); |
| 2836 // Move arguments to correct registers xmm0 and xmm1. | 2836 // Move arguments to correct registers xmm0 and xmm1. |
| 2837 __ movsd(xmm0, left_reg); | 2837 __ movaps(xmm0, left_reg); |
| 2838 // Right argument is already in xmm1. | 2838 // Right argument is already in xmm1. |
| 2839 __ CallCFunction( | 2839 __ CallCFunction( |
| 2840 ExternalReference::power_double_double_function(isolate()), 2); | 2840 ExternalReference::power_double_double_function(isolate()), 2); |
| 2841 } | 2841 } |
| 2842 // Return value is in xmm0. | 2842 // Return value is in xmm0. |
| 2843 __ movsd(result_reg, xmm0); | 2843 __ movaps(result_reg, xmm0); |
| 2844 // Restore context register. | 2844 // Restore context register. |
| 2845 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); | 2845 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
| 2846 } | 2846 } |
| 2847 | 2847 |
| 2848 | 2848 |
| 2849 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { | 2849 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { |
| 2850 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); | 2850 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); |
| 2851 TranscendentalCacheStub stub(TranscendentalCache::LOG, | 2851 TranscendentalCacheStub stub(TranscendentalCache::LOG, |
| 2852 TranscendentalCacheStub::UNTAGGED); | 2852 TranscendentalCacheStub::UNTAGGED); |
| 2853 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 2853 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| (...skipping 561 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3415 | 3415 |
| 3416 // Heap number map check. | 3416 // Heap number map check. |
| 3417 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), | 3417 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 3418 Heap::kHeapNumberMapRootIndex); | 3418 Heap::kHeapNumberMapRootIndex); |
| 3419 __ j(equal, &heap_number); | 3419 __ j(equal, &heap_number); |
| 3420 | 3420 |
| 3421 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); | 3421 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); |
| 3422 DeoptimizeIf(not_equal, env); | 3422 DeoptimizeIf(not_equal, env); |
| 3423 | 3423 |
| 3424 // Convert undefined to NaN. Compute NaN as 0/0. | 3424 // Convert undefined to NaN. Compute NaN as 0/0. |
| 3425 __ xorpd(result_reg, result_reg); | 3425 __ xorps(result_reg, result_reg); |
| 3426 __ divsd(result_reg, result_reg); | 3426 __ divsd(result_reg, result_reg); |
| 3427 __ jmp(&done); | 3427 __ jmp(&done); |
| 3428 | 3428 |
| 3429 // Heap number to XMM conversion. | 3429 // Heap number to XMM conversion. |
| 3430 __ bind(&heap_number); | 3430 __ bind(&heap_number); |
| 3431 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 3431 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 3432 __ jmp(&done); | 3432 __ jmp(&done); |
| 3433 | 3433 |
| 3434 // Smi to XMM conversion | 3434 // Smi to XMM conversion |
| 3435 __ bind(&load_smi); | 3435 __ bind(&load_smi); |
| (...skipping 580 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4016 RegisterEnvironmentForDeoptimization(environment); | 4016 RegisterEnvironmentForDeoptimization(environment); |
| 4017 ASSERT(osr_pc_offset_ == -1); | 4017 ASSERT(osr_pc_offset_ == -1); |
| 4018 osr_pc_offset_ = masm()->pc_offset(); | 4018 osr_pc_offset_ = masm()->pc_offset(); |
| 4019 } | 4019 } |
| 4020 | 4020 |
| 4021 #undef __ | 4021 #undef __ |
| 4022 | 4022 |
| 4023 } } // namespace v8::internal | 4023 } } // namespace v8::internal |
| 4024 | 4024 |
| 4025 #endif // V8_TARGET_ARCH_X64 | 4025 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |