OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 2518 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2529 __ and_(right, left, Operand(scratch1)); | 2529 __ and_(right, left, Operand(scratch1)); |
2530 __ Ret(); | 2530 __ Ret(); |
2531 break; | 2531 break; |
2532 default: | 2532 default: |
2533 UNREACHABLE(); | 2533 UNREACHABLE(); |
2534 } | 2534 } |
2535 __ bind(¬_smi_result); | 2535 __ bind(¬_smi_result); |
2536 } | 2536 } |
2537 | 2537 |
2538 | 2538 |
2539 void TypeRecordingBinaryOpStub::GenerateVFPOperation( | 2539 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
2540 MacroAssembler* masm) { | 2540 bool smi_operands, |
2541 switch (op_) { | 2541 Label* not_numbers, |
2542 case Token::ADD: | 2542 Label* gc_required) { |
2543 __ vadd(d5, d6, d7); | 2543 Register left = r1; |
2544 break; | 2544 Register right = r0; |
2545 case Token::SUB: | 2545 Register scratch1 = r7; |
2546 __ vsub(d5, d6, d7); | 2546 Register scratch2 = r9; |
2547 break; | 2547 |
2548 case Token::MUL: | 2548 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending |
2549 __ vmul(d5, d6, d7); | 2549 // on whether VFP3 is available. |
2550 break; | 2550 FloatingPointHelper::Destination destination = |
2551 case Token::DIV: | 2551 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? |
2552 __ vdiv(d5, d6, d7); | 2552 FloatingPointHelper::kVFPRegisters : |
2553 break; | 2553 FloatingPointHelper::kCoreRegisters; |
2554 default: | 2554 |
2555 UNREACHABLE(); | 2555 Register heap_number_map = r6; |
| 2556 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2557 |
| 2558 // Allocate new heap number for result. |
| 2559 Register result = r5; |
| 2560 __ AllocateHeapNumber( |
| 2561 result, scratch1, scratch2, heap_number_map, gc_required); |
| 2562 |
| 2563 // Load the operands. |
| 2564 if (smi_operands) { |
| 2565 if (FLAG_debug_code) { |
| 2566 __ AbortIfNotSmi(left); |
| 2567 __ AbortIfNotSmi(right); |
| 2568 } |
| 2569 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); |
| 2570 } else { |
| 2571 FloatingPointHelper::LoadOperands(masm, |
| 2572 destination, |
| 2573 heap_number_map, |
| 2574 scratch1, |
| 2575 scratch2, |
| 2576 not_numbers); |
| 2577 } |
| 2578 |
| 2579 // Calculate the result. |
| 2580 if (destination == FloatingPointHelper::kVFPRegisters) { |
| 2581 // Using VFP registers: |
| 2582 // d6: Left value |
| 2583 // d7: Right value |
| 2584 CpuFeatures::Scope scope(VFP3); |
| 2585 switch (op_) { |
| 2586 case Token::ADD: |
| 2587 __ vadd(d5, d6, d7); |
| 2588 break; |
| 2589 case Token::SUB: |
| 2590 __ vsub(d5, d6, d7); |
| 2591 break; |
| 2592 case Token::MUL: |
| 2593 __ vmul(d5, d6, d7); |
| 2594 break; |
| 2595 case Token::DIV: |
| 2596 __ vdiv(d5, d6, d7); |
| 2597 break; |
| 2598 default: |
| 2599 UNREACHABLE(); |
| 2600 } |
| 2601 |
| 2602 __ sub(r0, result, Operand(kHeapObjectTag)); |
| 2603 __ vstr(d5, r0, HeapNumber::kValueOffset); |
| 2604 __ add(r0, r0, Operand(kHeapObjectTag)); |
| 2605 __ Ret(); |
| 2606 } else { |
| 2607 // Using core registers: |
| 2608 // r0: Left value (least significant part of mantissa). |
| 2609 // r1: Left value (sign, exponent, top of mantissa). |
| 2610 // r2: Right value (least significant part of mantissa). |
| 2611 // r3: Right value (sign, exponent, top of mantissa). |
| 2612 |
| 2613 __ push(lr); // For later. |
| 2614 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments. |
| 2615 // Call C routine that may not cause GC or other trouble. r5 is callee |
| 2616 // save. |
| 2617 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); |
| 2618 // Store answer in the overwritable heap number. |
| 2619 #if !defined(USE_ARM_EABI) |
| 2620 // Double returned in fp coprocessor register 0 and 1, encoded as |
| 2621 // register cr8. Offsets must be divisible by 4 for coprocessor so we |
| 2622 // need to substract the tag from r5. |
| 2623 __ sub(scratch1, result, Operand(kHeapObjectTag)); |
| 2624 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); |
| 2625 #else |
| 2626 // Double returned in registers 0 and 1. |
| 2627 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); |
| 2628 #endif |
| 2629 __ mov(r0, Operand(result)); |
| 2630 // And we are done. |
| 2631 __ pop(pc); |
2556 } | 2632 } |
2557 } | 2633 } |
2558 | 2634 |
2559 | 2635 |
2560 // Generate the smi code. If the operation on smis are successful this return is | 2636 // Generate the smi code. If the operation on smis are successful this return is |
2561 // generated. If the result is not a smi and heap number allocation is not | 2637 // generated. If the result is not a smi and heap number allocation is not |
2562 // requested the code falls through. If number allocation is requested but a | 2638 // requested the code falls through. If number allocation is requested but a |
2563 // heap number cannot be allocated the code jumps to the lable gc_required. | 2639 // heap number cannot be allocated the code jumps to the lable gc_required. |
2564 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 2640 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
2565 Label* gc_required, | 2641 Label* gc_required, |
(...skipping 16 matching lines...) Expand all Loading... |
2582 STATIC_ASSERT(kSmiTag == 0); | 2658 STATIC_ASSERT(kSmiTag == 0); |
2583 __ tst(scratch1, Operand(kSmiTagMask)); | 2659 __ tst(scratch1, Operand(kSmiTagMask)); |
2584 __ b(ne, ¬_smis); | 2660 __ b(ne, ¬_smis); |
2585 | 2661 |
2586 // If the smi-smi operation results in a smi return is generated. | 2662 // If the smi-smi operation results in a smi return is generated. |
2587 GenerateSmiSmiOperation(masm); | 2663 GenerateSmiSmiOperation(masm); |
2588 | 2664 |
2589 // If heap number results are possible generate the result in an allocated | 2665 // If heap number results are possible generate the result in an allocated |
2590 // heap number. | 2666 // heap number. |
2591 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { | 2667 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { |
2592 FloatingPointHelper::Destination destination = | 2668 GenerateFPOperation(masm, true, NULL, gc_required); |
2593 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? | |
2594 FloatingPointHelper::kVFPRegisters : | |
2595 FloatingPointHelper::kCoreRegisters; | |
2596 | |
2597 Register heap_number_map = r6; | |
2598 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
2599 | |
2600 // Allocate new heap number for result. | |
2601 Register result = r5; | |
2602 __ AllocateHeapNumber( | |
2603 result, scratch1, scratch2, heap_number_map, gc_required); | |
2604 | |
2605 // Load the smis. | |
2606 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); | |
2607 | |
2608 // Calculate the result. | |
2609 if (destination == FloatingPointHelper::kVFPRegisters) { | |
2610 // Using VFP registers: | |
2611 // d6: Left value | |
2612 // d7: Right value | |
2613 CpuFeatures::Scope scope(VFP3); | |
2614 GenerateVFPOperation(masm); | |
2615 | |
2616 __ sub(r0, result, Operand(kHeapObjectTag)); | |
2617 __ vstr(d5, r0, HeapNumber::kValueOffset); | |
2618 __ add(r0, r0, Operand(kHeapObjectTag)); | |
2619 __ Ret(); | |
2620 } else { | |
2621 // Using core registers: | |
2622 // r0: Left value (least significant part of mantissa). | |
2623 // r1: Left value (sign, exponent, top of mantissa). | |
2624 // r2: Right value (least significant part of mantissa). | |
2625 // r3: Right value (sign, exponent, top of mantissa). | |
2626 | |
2627 __ push(lr); // For later. | |
2628 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments. | |
2629 // Call C routine that may not cause GC or other trouble. r5 is callee | |
2630 // save. | |
2631 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); | |
2632 // Store answer in the overwritable heap number. | |
2633 #if !defined(USE_ARM_EABI) | |
2634 // Double returned in fp coprocessor register 0 and 1, encoded as | |
2635 // register cr8. Offsets must be divisible by 4 for coprocessor so we | |
2636 // need to substract the tag from r5. | |
2637 __ sub(scratch1, result, Operand(kHeapObjectTag)); | |
2638 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); | |
2639 #else | |
2640 // Double returned in registers 0 and 1. | |
2641 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); | |
2642 #endif | |
2643 __ mov(r0, Operand(result)); | |
2644 // And we are done. | |
2645 __ pop(pc); | |
2646 } | |
2647 } | 2669 } |
2648 __ bind(¬_smis); | 2670 __ bind(¬_smis); |
2649 } | 2671 } |
2650 | 2672 |
2651 | 2673 |
2652 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 2674 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
2653 Label not_smis, call_runtime; | 2675 Label not_smis, call_runtime; |
2654 | 2676 |
2655 ASSERT(op_ == Token::ADD || | 2677 ASSERT(op_ == Token::ADD || |
2656 op_ == Token::SUB || | 2678 op_ == Token::SUB || |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2700 } | 2722 } |
2701 | 2723 |
2702 | 2724 |
2703 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 2725 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
2704 ASSERT(op_ == Token::ADD || | 2726 ASSERT(op_ == Token::ADD || |
2705 op_ == Token::SUB || | 2727 op_ == Token::SUB || |
2706 op_ == Token::MUL || | 2728 op_ == Token::MUL || |
2707 op_ == Token::DIV || | 2729 op_ == Token::DIV || |
2708 op_ == Token::MOD); | 2730 op_ == Token::MOD); |
2709 | 2731 |
2710 Register scratch1 = r7; | 2732 Label not_numbers, call_runtime; |
2711 Register scratch2 = r9; | |
2712 | |
2713 Label not_number, call_runtime; | |
2714 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); | 2733 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); |
2715 | 2734 |
2716 Register heap_number_map = r6; | 2735 GenerateFPOperation(masm, false, ¬_numbers, &call_runtime); |
2717 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
2718 | 2736 |
2719 // Get a heap number object for the result - might be left or right if one | 2737 __ bind(¬_numbers); |
2720 // of these are overwritable. Uses a callee-save register to keep the value | |
2721 // across the C call which we might use below. | |
2722 Register result = r5; | |
2723 GenerateHeapResultAllocation( | |
2724 masm, result, heap_number_map, scratch1, scratch2, &call_runtime); | |
2725 | |
2726 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending on | |
2727 // whether VFP3 is available. | |
2728 FloatingPointHelper::Destination destination = | |
2729 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? | |
2730 FloatingPointHelper::kVFPRegisters : | |
2731 FloatingPointHelper::kCoreRegisters; | |
2732 FloatingPointHelper::LoadOperands(masm, | |
2733 destination, | |
2734 heap_number_map, | |
2735 scratch1, | |
2736 scratch2, | |
2737 ¬_number); | |
2738 if (destination == FloatingPointHelper::kVFPRegisters) { | |
2739 // Use floating point instructions for the binary operation. | |
2740 CpuFeatures::Scope scope(VFP3); | |
2741 GenerateVFPOperation(masm); | |
2742 | |
2743 // Fill the result into the allocated heap number and return. | |
2744 __ sub(r0, result, Operand(kHeapObjectTag)); | |
2745 __ vstr(d5, r0, HeapNumber::kValueOffset); | |
2746 __ add(r0, r0, Operand(kHeapObjectTag)); | |
2747 __ Ret(); | |
2748 | |
2749 } else { | |
2750 // Call a C function for the binary operation. | |
2751 // r0/r1: Left operand | |
2752 // r2/r3: Right operand | |
2753 | |
2754 __ push(lr); // For returning later (no GC after this point). | |
2755 __ PrepareCallCFunction(4, scratch1); // Two doubles count as 4 arguments. | |
2756 // Call C routine that may not cause GC or other trouble. result (r5) is | |
2757 // callee saved. | |
2758 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); | |
2759 // Fill the result into the allocated heap number. | |
2760 #if !defined(USE_ARM_EABI) | |
2761 // Double returned in fp coprocessor register 0 and 1, encoded as | |
2762 // register cr8. Offsets must be divisible by 4 for coprocessor so we | |
2763 // need to substract the tag from r5. | |
2764 __ sub(scratch1, result, Operand(kHeapObjectTag)); | |
2765 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); | |
2766 #else | |
2767 // Double returned in registers 0 and 1. | |
2768 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); | |
2769 #endif | |
2770 __ mov(r0, Operand(result)); | |
2771 __ pop(pc); // Return to the pushed lr. | |
2772 } | |
2773 | |
2774 __ bind(¬_number); | |
2775 GenerateTypeTransition(masm); | 2738 GenerateTypeTransition(masm); |
2776 | 2739 |
2777 __ bind(&call_runtime); | 2740 __ bind(&call_runtime); |
2778 GenerateCallRuntime(masm); | 2741 GenerateCallRuntime(masm); |
2779 } | 2742 } |
2780 | 2743 |
2781 | 2744 |
2782 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 2745 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
2783 ASSERT(op_ == Token::ADD || | 2746 ASSERT(op_ == Token::ADD || |
2784 op_ == Token::SUB || | 2747 op_ == Token::SUB || |
(...skipping 2984 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5769 __ pop(r1); | 5732 __ pop(r1); |
5770 __ Jump(r2); | 5733 __ Jump(r2); |
5771 } | 5734 } |
5772 | 5735 |
5773 | 5736 |
5774 #undef __ | 5737 #undef __ |
5775 | 5738 |
5776 } } // namespace v8::internal | 5739 } } // namespace v8::internal |
5777 | 5740 |
5778 #endif // V8_TARGET_ARCH_ARM | 5741 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |