OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 290 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
301 | 301 |
302 const char* GetName() { return "ConvertToDoubleStub"; } | 302 const char* GetName() { return "ConvertToDoubleStub"; } |
303 | 303 |
304 #ifdef DEBUG | 304 #ifdef DEBUG |
305 void Print() { PrintF("ConvertToDoubleStub\n"); } | 305 void Print() { PrintF("ConvertToDoubleStub\n"); } |
306 #endif | 306 #endif |
307 }; | 307 }; |
308 | 308 |
309 | 309 |
310 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { | 310 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { |
311 Register exponent = result2_; | 311 Register exponent = result1_; |
312 Register mantissa = result1_; | 312 Register mantissa = result2_; |
313 | 313 |
314 Label not_special; | 314 Label not_special; |
315 // Convert from Smi to integer. | 315 // Convert from Smi to integer. |
316 __ mov(source_, Operand(source_, ASR, kSmiTagSize)); | 316 __ mov(source_, Operand(source_, ASR, kSmiTagSize)); |
317 // Move sign bit from source to destination. This works because the sign bit | 317 // Move sign bit from source to destination. This works because the sign bit |
318 // in the exponent word of the double has the same position and polarity as | 318 // in the exponent word of the double has the same position and polarity as |
319 // the 2's complement sign bit in a Smi. | 319 // the 2's complement sign bit in a Smi. |
320 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | 320 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
321 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); | 321 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); |
322 // Subtract from 0 if source was negative. | 322 // Subtract from 0 if source was negative. |
(...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
510 __ vmov(r2, r3, d7); | 510 __ vmov(r2, r3, d7); |
511 __ vmov(r0, r1, d6); | 511 __ vmov(r0, r1, d6); |
512 } | 512 } |
513 } else { | 513 } else { |
514 ASSERT(destination == kCoreRegisters); | 514 ASSERT(destination == kCoreRegisters); |
515 // Write Smi from r0 to r3 and r2 in double format. | 515 // Write Smi from r0 to r3 and r2 in double format. |
516 __ mov(scratch1, Operand(r0)); | 516 __ mov(scratch1, Operand(r0)); |
517 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); | 517 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); |
518 __ push(lr); | 518 __ push(lr); |
519 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 519 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
520 // Write Smi from r1 to r1 and r0 in double format. r9 is scratch. | 520 // Write Smi from r1 to r1 and r0 in double format. |
521 __ mov(scratch1, Operand(r1)); | 521 __ mov(scratch1, Operand(r1)); |
522 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); | 522 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); |
523 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 523 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
524 __ pop(lr); | 524 __ pop(lr); |
525 } | 525 } |
526 } | 526 } |
527 | 527 |
528 | 528 |
529 void FloatingPointHelper::LoadOperands( | 529 void FloatingPointHelper::LoadOperands( |
530 MacroAssembler* masm, | 530 MacroAssembler* masm, |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
675 if (CpuFeatures::IsSupported(VFP3)) { | 675 if (CpuFeatures::IsSupported(VFP3)) { |
676 CpuFeatures::Scope scope(VFP3); | 676 CpuFeatures::Scope scope(VFP3); |
677 __ vmov(single_scratch, scratch1); | 677 __ vmov(single_scratch, scratch1); |
678 __ vcvt_f64_s32(double_dst, single_scratch); | 678 __ vcvt_f64_s32(double_dst, single_scratch); |
679 if (destination == kCoreRegisters) { | 679 if (destination == kCoreRegisters) { |
680 __ vmov(dst1, dst2, double_dst); | 680 __ vmov(dst1, dst2, double_dst); |
681 } | 681 } |
682 } else { | 682 } else { |
683 Label fewer_than_20_useful_bits; | 683 Label fewer_than_20_useful_bits; |
684 // Expected output: | 684 // Expected output: |
685 // | dst1 | dst2 | | 685 // | dst2 | dst1 | |
686 // | s | exp | mantissa | | 686 // | s | exp | mantissa | |
687 | 687 |
688 // Check for zero. | 688 // Check for zero. |
689 __ cmp(scratch1, Operand(0)); | 689 __ cmp(scratch1, Operand(0)); |
| 690 __ mov(dst2, scratch1); |
690 __ mov(dst1, scratch1); | 691 __ mov(dst1, scratch1); |
691 __ mov(dst2, scratch1); | |
692 __ b(eq, &done); | 692 __ b(eq, &done); |
693 | 693 |
694 // Preload the sign of the value. | 694 // Preload the sign of the value. |
695 __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC); | 695 __ and_(dst2, scratch1, Operand(HeapNumber::kSignMask), SetCC); |
696 // Get the absolute value of the object (as an unsigned integer). | 696 // Get the absolute value of the object (as an unsigned integer). |
697 __ rsb(scratch1, scratch1, Operand(0), SetCC, mi); | 697 __ rsb(scratch1, scratch1, Operand(0), SetCC, mi); |
698 | 698 |
699 // Get mantisssa[51:20]. | 699 // Get mantisssa[51:20]. |
700 | 700 |
701 // Get the position of the first set bit. | 701 // Get the position of the first set bit. |
702 __ CountLeadingZeros(dst2, scratch1, scratch2); | 702 __ CountLeadingZeros(dst1, scratch1, scratch2); |
703 __ rsb(dst2, dst2, Operand(31)); | 703 __ rsb(dst1, dst1, Operand(31)); |
704 | 704 |
705 // Set the exponent. | 705 // Set the exponent. |
706 __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias)); | 706 __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias)); |
707 __ Bfi(dst1, scratch2, scratch2, | 707 __ Bfi(dst2, scratch2, scratch2, |
708 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 708 HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
709 | 709 |
710 // Clear the first non null bit. | 710 // Clear the first non null bit. |
711 __ mov(scratch2, Operand(1)); | 711 __ mov(scratch2, Operand(1)); |
712 __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2)); | 712 __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst1)); |
713 | 713 |
714 __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord)); | 714 __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); |
715 // Get the number of bits to set in the lower part of the mantissa. | 715 // Get the number of bits to set in the lower part of the mantissa. |
716 __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); | 716 __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); |
717 __ b(mi, &fewer_than_20_useful_bits); | 717 __ b(mi, &fewer_than_20_useful_bits); |
718 // Set the higher 20 bits of the mantissa. | 718 // Set the higher 20 bits of the mantissa. |
719 __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2)); | 719 __ orr(dst2, dst2, Operand(scratch1, LSR, scratch2)); |
720 __ rsb(scratch2, scratch2, Operand(32)); | 720 __ rsb(scratch2, scratch2, Operand(32)); |
721 __ mov(dst2, Operand(scratch1, LSL, scratch2)); | 721 __ mov(dst1, Operand(scratch1, LSL, scratch2)); |
722 __ b(&done); | 722 __ b(&done); |
723 | 723 |
724 __ bind(&fewer_than_20_useful_bits); | 724 __ bind(&fewer_than_20_useful_bits); |
725 __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord)); | 725 __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); |
726 __ mov(scratch2, Operand(scratch1, LSL, scratch2)); | 726 __ mov(scratch2, Operand(scratch1, LSL, scratch2)); |
727 __ orr(dst1, dst1, scratch2); | 727 __ orr(dst2, dst2, scratch2); |
728 // Set dst2 to 0. | 728 // Set dst1 to 0. |
729 __ mov(dst2, Operand(0)); | 729 __ mov(dst1, Operand(0)); |
730 } | 730 } |
731 | 731 |
732 __ b(&done); | 732 __ b(&done); |
733 | 733 |
734 __ bind(&obj_is_not_smi); | 734 __ bind(&obj_is_not_smi); |
735 if (FLAG_debug_code) { | 735 if (FLAG_debug_code) { |
736 __ AbortIfNotRootValue(heap_number_map, | 736 __ AbortIfNotRootValue(heap_number_map, |
737 Heap::kHeapNumberMapRootIndex, | 737 Heap::kHeapNumberMapRootIndex, |
738 "HeapNumberMap register clobbered."); | 738 "HeapNumberMap register clobbered."); |
739 } | 739 } |
(...skipping 1315 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2055 __ sub(r0, result, Operand(kHeapObjectTag)); | 2055 __ sub(r0, result, Operand(kHeapObjectTag)); |
2056 __ vstr(d5, r0, HeapNumber::kValueOffset); | 2056 __ vstr(d5, r0, HeapNumber::kValueOffset); |
2057 __ add(r0, r0, Operand(kHeapObjectTag)); | 2057 __ add(r0, r0, Operand(kHeapObjectTag)); |
2058 __ Ret(); | 2058 __ Ret(); |
2059 } else { | 2059 } else { |
2060 // Call the C function to handle the double operation. | 2060 // Call the C function to handle the double operation. |
2061 FloatingPointHelper::CallCCodeForDoubleOperation(masm, | 2061 FloatingPointHelper::CallCCodeForDoubleOperation(masm, |
2062 op_, | 2062 op_, |
2063 result, | 2063 result, |
2064 scratch1); | 2064 scratch1); |
| 2065 if (FLAG_debug_code) { |
| 2066 __ stop("Unreachable code."); |
| 2067 } |
2065 } | 2068 } |
2066 break; | 2069 break; |
2067 } | 2070 } |
2068 case Token::BIT_OR: | 2071 case Token::BIT_OR: |
2069 case Token::BIT_XOR: | 2072 case Token::BIT_XOR: |
2070 case Token::BIT_AND: | 2073 case Token::BIT_AND: |
2071 case Token::SAR: | 2074 case Token::SAR: |
2072 case Token::SHR: | 2075 case Token::SHR: |
2073 case Token::SHL: { | 2076 case Token::SHL: { |
2074 if (smi_operands) { | 2077 if (smi_operands) { |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2184 UNREACHABLE(); | 2187 UNREACHABLE(); |
2185 } | 2188 } |
2186 } | 2189 } |
2187 | 2190 |
2188 | 2191 |
2189 // Generate the smi code. If the operation on smis are successful this return is | 2192 // Generate the smi code. If the operation on smis are successful this return is |
2190 // generated. If the result is not a smi and heap number allocation is not | 2193 // generated. If the result is not a smi and heap number allocation is not |
2191 // requested the code falls through. If number allocation is requested but a | 2194 // requested the code falls through. If number allocation is requested but a |
2192 // heap number cannot be allocated the code jumps to the lable gc_required. | 2195 // heap number cannot be allocated the code jumps to the lable gc_required. |
2193 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 2196 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
| 2197 Label* use_runtime, |
2194 Label* gc_required, | 2198 Label* gc_required, |
2195 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 2199 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
2196 Label not_smis; | 2200 Label not_smis; |
2197 | 2201 |
2198 Register left = r1; | 2202 Register left = r1; |
2199 Register right = r0; | 2203 Register right = r0; |
2200 Register scratch1 = r7; | 2204 Register scratch1 = r7; |
2201 Register scratch2 = r9; | 2205 Register scratch2 = r9; |
2202 | 2206 |
2203 // Perform combined smi check on both operands. | 2207 // Perform combined smi check on both operands. |
2204 __ orr(scratch1, left, Operand(right)); | 2208 __ orr(scratch1, left, Operand(right)); |
2205 STATIC_ASSERT(kSmiTag == 0); | 2209 STATIC_ASSERT(kSmiTag == 0); |
2206 __ tst(scratch1, Operand(kSmiTagMask)); | 2210 __ tst(scratch1, Operand(kSmiTagMask)); |
2207 __ b(ne, ¬_smis); | 2211 __ b(ne, ¬_smis); |
2208 | 2212 |
2209 // If the smi-smi operation results in a smi return is generated. | 2213 // If the smi-smi operation results in a smi return is generated. |
2210 GenerateSmiSmiOperation(masm); | 2214 GenerateSmiSmiOperation(masm); |
2211 | 2215 |
2212 // If heap number results are possible generate the result in an allocated | 2216 // If heap number results are possible generate the result in an allocated |
2213 // heap number. | 2217 // heap number. |
2214 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { | 2218 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { |
2215 GenerateFPOperation(masm, true, NULL, gc_required); | 2219 GenerateFPOperation(masm, true, use_runtime, gc_required); |
2216 } | 2220 } |
2217 __ bind(¬_smis); | 2221 __ bind(¬_smis); |
2218 } | 2222 } |
2219 | 2223 |
2220 | 2224 |
2221 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 2225 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
2222 Label not_smis, call_runtime; | 2226 Label not_smis, call_runtime; |
2223 | 2227 |
2224 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || | 2228 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || |
2225 result_type_ == TRBinaryOpIC::SMI) { | 2229 result_type_ == TRBinaryOpIC::SMI) { |
2226 // Only allow smi results. | 2230 // Only allow smi results. |
2227 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); | 2231 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS); |
2228 } else { | 2232 } else { |
2229 // Allow heap number result and don't make a transition if a heap number | 2233 // Allow heap number result and don't make a transition if a heap number |
2230 // cannot be allocated. | 2234 // cannot be allocated. |
2231 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 2235 GenerateSmiCode(masm, |
| 2236 &call_runtime, |
| 2237 &call_runtime, |
| 2238 ALLOW_HEAPNUMBER_RESULTS); |
2232 } | 2239 } |
2233 | 2240 |
2234 // Code falls through if the result is not returned as either a smi or heap | 2241 // Code falls through if the result is not returned as either a smi or heap |
2235 // number. | 2242 // number. |
2236 GenerateTypeTransition(masm); | 2243 GenerateTypeTransition(masm); |
2237 | 2244 |
2238 __ bind(&call_runtime); | 2245 __ bind(&call_runtime); |
2239 GenerateCallRuntime(masm); | 2246 GenerateCallRuntime(masm); |
2240 } | 2247 } |
2241 | 2248 |
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2410 scratch1, | 2417 scratch1, |
2411 scratch2, | 2418 scratch2, |
2412 &call_runtime); | 2419 &call_runtime); |
2413 | 2420 |
2414 // Load the left value from the value saved on the stack. | 2421 // Load the left value from the value saved on the stack. |
2415 __ Pop(r1, r0); | 2422 __ Pop(r1, r0); |
2416 | 2423 |
2417 // Call the C function to handle the double operation. | 2424 // Call the C function to handle the double operation. |
2418 FloatingPointHelper::CallCCodeForDoubleOperation( | 2425 FloatingPointHelper::CallCCodeForDoubleOperation( |
2419 masm, op_, heap_number_result, scratch1); | 2426 masm, op_, heap_number_result, scratch1); |
| 2427 if (FLAG_debug_code) { |
| 2428 __ stop("Unreachable code."); |
| 2429 } |
2420 } | 2430 } |
2421 | 2431 |
2422 break; | 2432 break; |
2423 } | 2433 } |
2424 | 2434 |
2425 case Token::BIT_OR: | 2435 case Token::BIT_OR: |
2426 case Token::BIT_XOR: | 2436 case Token::BIT_XOR: |
2427 case Token::BIT_AND: | 2437 case Token::BIT_AND: |
2428 case Token::SAR: | 2438 case Token::SAR: |
2429 case Token::SHR: | 2439 case Token::SHR: |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2496 | 2506 |
2497 // Check if the result fits in a smi. | 2507 // Check if the result fits in a smi. |
2498 __ add(scratch1, r2, Operand(0x40000000), SetCC); | 2508 __ add(scratch1, r2, Operand(0x40000000), SetCC); |
2499 // If not try to return a heap number. (We know the result is an int32.) | 2509 // If not try to return a heap number. (We know the result is an int32.) |
2500 __ b(mi, &return_heap_number); | 2510 __ b(mi, &return_heap_number); |
2501 // Tag the result and return. | 2511 // Tag the result and return. |
2502 __ SmiTag(r0, r2); | 2512 __ SmiTag(r0, r2); |
2503 __ Ret(); | 2513 __ Ret(); |
2504 | 2514 |
2505 __ bind(&return_heap_number); | 2515 __ bind(&return_heap_number); |
| 2516 heap_number_result = r5; |
| 2517 GenerateHeapResultAllocation(masm, |
| 2518 heap_number_result, |
| 2519 heap_number_map, |
| 2520 scratch1, |
| 2521 scratch2, |
| 2522 &call_runtime); |
| 2523 |
2506 if (CpuFeatures::IsSupported(VFP3)) { | 2524 if (CpuFeatures::IsSupported(VFP3)) { |
2507 CpuFeatures::Scope scope(VFP3); | 2525 CpuFeatures::Scope scope(VFP3); |
2508 heap_number_result = r5; | |
2509 GenerateHeapResultAllocation(masm, | |
2510 heap_number_result, | |
2511 heap_number_map, | |
2512 scratch1, | |
2513 scratch2, | |
2514 &call_runtime); | |
2515 | |
2516 if (op_ != Token::SHR) { | 2526 if (op_ != Token::SHR) { |
2517 // Convert the result to a floating point value. | 2527 // Convert the result to a floating point value. |
2518 __ vmov(double_scratch.low(), r2); | 2528 __ vmov(double_scratch.low(), r2); |
2519 __ vcvt_f64_s32(double_scratch, double_scratch.low()); | 2529 __ vcvt_f64_s32(double_scratch, double_scratch.low()); |
2520 } else { | 2530 } else { |
2521 // The result must be interpreted as an unsigned 32-bit integer. | 2531 // The result must be interpreted as an unsigned 32-bit integer. |
2522 __ vmov(double_scratch.low(), r2); | 2532 __ vmov(double_scratch.low(), r2); |
2523 __ vcvt_f64_u32(double_scratch, double_scratch.low()); | 2533 __ vcvt_f64_u32(double_scratch, double_scratch.low()); |
2524 } | 2534 } |
2525 | 2535 |
2526 // Store the result. | 2536 // Store the result. |
2527 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); | 2537 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
2528 __ vstr(double_scratch, r0, HeapNumber::kValueOffset); | 2538 __ vstr(double_scratch, r0, HeapNumber::kValueOffset); |
2529 __ mov(r0, heap_number_result); | 2539 __ mov(r0, heap_number_result); |
2530 __ Ret(); | 2540 __ Ret(); |
2531 } else { | 2541 } else { |
2532 // Tail call that writes the int32 in r2 to the heap number in r0, using | 2542 // Tail call that writes the int32 in r2 to the heap number in r0, using |
2533 // r3 as scratch. r0 is preserved and returned. | 2543 // r3 as scratch. r0 is preserved and returned. |
| 2544 __ mov(r0, r5); |
2534 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | 2545 WriteInt32ToHeapNumberStub stub(r2, r0, r3); |
2535 __ TailCallStub(&stub); | 2546 __ TailCallStub(&stub); |
2536 } | 2547 } |
2537 | 2548 |
2538 break; | 2549 break; |
2539 } | 2550 } |
2540 | 2551 |
2541 default: | 2552 default: |
2542 UNREACHABLE(); | 2553 UNREACHABLE(); |
2543 } | 2554 } |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2594 GenerateFPOperation(masm, false, &call_runtime, &call_runtime); | 2605 GenerateFPOperation(masm, false, &call_runtime, &call_runtime); |
2595 | 2606 |
2596 __ bind(&call_runtime); | 2607 __ bind(&call_runtime); |
2597 GenerateCallRuntime(masm); | 2608 GenerateCallRuntime(masm); |
2598 } | 2609 } |
2599 | 2610 |
2600 | 2611 |
2601 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 2612 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
2602 Label call_runtime, call_string_add_or_runtime; | 2613 Label call_runtime, call_string_add_or_runtime; |
2603 | 2614 |
2604 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 2615 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
2605 | 2616 |
2606 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); | 2617 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); |
2607 | 2618 |
2608 __ bind(&call_string_add_or_runtime); | 2619 __ bind(&call_string_add_or_runtime); |
2609 if (op_ == Token::ADD) { | 2620 if (op_ == Token::ADD) { |
2610 GenerateAddStrings(masm); | 2621 GenerateAddStrings(masm); |
2611 } | 2622 } |
2612 | 2623 |
2613 __ bind(&call_runtime); | 2624 __ bind(&call_runtime); |
2614 GenerateCallRuntime(masm); | 2625 GenerateCallRuntime(masm); |
(...skipping 3229 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5844 __ str(pc, MemOperand(sp, 0)); | 5855 __ str(pc, MemOperand(sp, 0)); |
5845 __ Jump(target); // Call the C++ function. | 5856 __ Jump(target); // Call the C++ function. |
5846 } | 5857 } |
5847 | 5858 |
5848 | 5859 |
5849 #undef __ | 5860 #undef __ |
5850 | 5861 |
5851 } } // namespace v8::internal | 5862 } } // namespace v8::internal |
5852 | 5863 |
5853 #endif // V8_TARGET_ARCH_ARM | 5864 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |