Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(740)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 1113007: Revert change 4201. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/codegen-arm.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 5505 matching lines...) Expand 10 before | Expand all | Expand 10 after
5516 __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); 5516 __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
5517 } 5517 }
5518 5518
5519 5519
5520 // We fall into this code if the operands were Smis, but the result was 5520 // We fall into this code if the operands were Smis, but the result was
5521 // not (eg. overflow). We branch into this code (to the not_smi label) if 5521 // not (eg. overflow). We branch into this code (to the not_smi label) if
5522 // the operands were not both Smi. The operands are in r0 and r1. In order 5522 // the operands were not both Smi. The operands are in r0 and r1. In order
5523 // to call the C-implemented binary fp operation routines we need to end up 5523 // to call the C-implemented binary fp operation routines we need to end up
5524 // with the double precision floating point operands in r0 and r1 (for the 5524 // with the double precision floating point operands in r0 and r1 (for the
5525 // value in r1) and r2 and r3 (for the value in r0). 5525 // value in r1) and r2 and r3 (for the value in r0).
5526 void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, 5526 static void HandleBinaryOpSlowCases(MacroAssembler* masm,
5527 Label* not_smi, 5527 Label* not_smi,
5528 const Builtins::JavaScript& builtin) { 5528 const Builtins::JavaScript& builtin,
5529 Token::Value operation,
5530 OverwriteMode mode) {
5529 Label slow, slow_pop_2_first, do_the_call; 5531 Label slow, slow_pop_2_first, do_the_call;
5530 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; 5532 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
5533 // Smi-smi case (overflow).
5534 // Since both are Smis there is no heap number to overwrite, so allocate.
5535 // The new heap number is in r5. r6 and r7 are scratch.
5536 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5537
5531 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, 5538 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
5532 // using registers d7 and d6 for the double values. 5539 // using registers d7 and d6 for the double values.
5533 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && 5540 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
5534 Token::MOD != op_; 5541 Token::MOD != operation;
5535 5542 if (use_fp_registers) {
5536 if (ShouldGenerateSmiCode()) { 5543 CpuFeatures::Scope scope(VFP3);
5537 // Smi-smi case (overflow). 5544 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5538 // Since both are Smis there is no heap number to overwrite, so allocate. 5545 __ vmov(s15, r7);
5539 // The new heap number is in r5. r6 and r7 are scratch. 5546 __ vcvt(d7, s15);
5540 AllocateHeapNumber(masm, &slow, r5, r6, r7); 5547 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5541 5548 __ vmov(s13, r7);
5542 if (use_fp_registers) { 5549 __ vcvt(d6, s13);
5543 CpuFeatures::Scope scope(VFP3); 5550 } else {
5544 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); 5551 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
5545 __ vmov(s15, r7); 5552 __ mov(r7, Operand(r0));
5546 __ vcvt(d7, s15); 5553 ConvertToDoubleStub stub1(r3, r2, r7, r6);
5547 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); 5554 __ push(lr);
5548 __ vmov(s13, r7); 5555 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
5549 __ vcvt(d6, s13); 5556 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
5550 } else { 5557 __ mov(r7, Operand(r1));
5551 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. 5558 ConvertToDoubleStub stub2(r1, r0, r7, r6);
5552 __ mov(r7, Operand(r0)); 5559 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
5553 ConvertToDoubleStub stub1(r3, r2, r7, r6); 5560 __ pop(lr);
5554 __ push(lr);
5555 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
5556 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
5557 __ mov(r7, Operand(r1));
5558 ConvertToDoubleStub stub2(r1, r0, r7, r6);
5559 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
5560 __ pop(lr);
5561 }
5562
5563 __ jmp(&do_the_call); // Tail call. No return.
5564 } 5561 }
5565 5562
5566 // We branch here if at least one of r0 and r1 is not a Smi. 5563 __ jmp(&do_the_call); // Tail call. No return.
5567 __ bind(not_smi);
5568 5564
5569 if (ShouldGenerateFPCode()) {
5570 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
5571 switch (op_) {
5572 case Token::ADD:
5573 case Token::SUB:
5574 case Token::MUL:
5575 case Token::DIV:
5576 GenerateTypeTransition(masm);
5577 break;
5578
5579 default:
5580 break;
5581 }
5582 }
5583
5584 if (mode_ == NO_OVERWRITE) {
5585 // In the case where there is no chance of an overwritable float we may as
5586 // well do the allocation immediately while r0 and r1 are untouched.
5587 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5588 }
5589
5590 // Move r0 to a double in r2-r3.
5591 __ tst(r0, Operand(kSmiTagMask));
5592 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5593 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5594 __ b(ne, &slow);
5595 if (mode_ == OVERWRITE_RIGHT) {
5596 __ mov(r5, Operand(r0)); // Overwrite this heap number.
5597 }
5598 if (use_fp_registers) {
5599 CpuFeatures::Scope scope(VFP3);
5600 // Load the double from tagged HeapNumber r0 to d7.
5601 __ sub(r7, r0, Operand(kHeapObjectTag));
5602 __ vldr(d7, r7, HeapNumber::kValueOffset);
5603 } else {
5604 // Calling convention says that second double is in r2 and r3.
5605 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
5606 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
5607 }
5608 __ jmp(&finished_loading_r0);
5609 __ bind(&r0_is_smi);
5610 if (mode_ == OVERWRITE_RIGHT) {
5611 // We can't overwrite a Smi so get address of new heap number into r5.
5612 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5613 }
5614
5615 if (use_fp_registers) {
5616 CpuFeatures::Scope scope(VFP3);
5617 // Convert smi in r0 to double in d7.
5618 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5619 __ vmov(s15, r7);
5620 __ vcvt(d7, s15);
5621 } else {
5622 // Write Smi from r0 to r3 and r2 in double format.
5623 __ mov(r7, Operand(r0));
5624 ConvertToDoubleStub stub3(r3, r2, r7, r6);
5625 __ push(lr);
5626 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
5627 __ pop(lr);
5628 }
5629
5630 __ bind(&finished_loading_r0);
5631
5632 // Move r1 to a double in r0-r1.
5633 __ tst(r1, Operand(kSmiTagMask));
5634 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5635 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5636 __ b(ne, &slow);
5637 if (mode_ == OVERWRITE_LEFT) {
5638 __ mov(r5, Operand(r1)); // Overwrite this heap number.
5639 }
5640 if (use_fp_registers) {
5641 CpuFeatures::Scope scope(VFP3);
5642 // Load the double from tagged HeapNumber r1 to d6.
5643 __ sub(r7, r1, Operand(kHeapObjectTag));
5644 __ vldr(d6, r7, HeapNumber::kValueOffset);
5645 } else {
5646 // Calling convention says that first double is in r0 and r1.
5647 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
5648 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
5649 }
5650 __ jmp(&finished_loading_r1);
5651 __ bind(&r1_is_smi);
5652 if (mode_ == OVERWRITE_LEFT) {
5653 // We can't overwrite a Smi so get address of new heap number into r5.
5654 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5655 }
5656
5657 if (use_fp_registers) {
5658 CpuFeatures::Scope scope(VFP3);
5659 // Convert smi in r1 to double in d6.
5660 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5661 __ vmov(s13, r7);
5662 __ vcvt(d6, s13);
5663 } else {
5664 // Write Smi from r1 to r1 and r0 in double format.
5665 __ mov(r7, Operand(r1));
5666 ConvertToDoubleStub stub4(r1, r0, r7, r6);
5667 __ push(lr);
5668 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
5669 __ pop(lr);
5670 }
5671
5672 __ bind(&finished_loading_r1);
5673
5674 __ bind(&do_the_call);
5675 // If we are inlining the operation using VFP3 instructions for
5676 // add, subtract, multiply, or divide, the arguments are in d6 and d7.
5677 if (use_fp_registers) {
5678 CpuFeatures::Scope scope(VFP3);
5679 // ARMv7 VFP3 instructions to implement
5680 // double precision, add, subtract, multiply, divide.
5681
5682 if (Token::MUL == op_) {
5683 __ vmul(d5, d6, d7);
5684 } else if (Token::DIV == op_) {
5685 __ vdiv(d5, d6, d7);
5686 } else if (Token::ADD == op_) {
5687 __ vadd(d5, d6, d7);
5688 } else if (Token::SUB == op_) {
5689 __ vsub(d5, d6, d7);
5690 } else {
5691 UNREACHABLE();
5692 }
5693 __ sub(r0, r5, Operand(kHeapObjectTag));
5694 __ vstr(d5, r0, HeapNumber::kValueOffset);
5695 __ add(r0, r0, Operand(kHeapObjectTag));
5696 __ mov(pc, lr);
5697 } else {
5698 // If we did not inline the operation, then the arguments are in:
5699 // r0: Left value (least significant part of mantissa).
5700 // r1: Left value (sign, exponent, top of mantissa).
5701 // r2: Right value (least significant part of mantissa).
5702 // r3: Right value (sign, exponent, top of mantissa).
5703 // r5: Address of heap number for result.
5704
5705 __ push(lr); // For later.
5706 __ push(r5); // Address of heap number that is answer.
5707 __ AlignStack(0);
5708 // Call C routine that may not cause GC or other trouble.
5709 __ mov(r5, Operand(ExternalReference::double_fp_operation(op_)));
5710 __ Call(r5);
5711 __ pop(r4); // Address of heap number.
5712 __ cmp(r4, Operand(Smi::FromInt(0)));
5713 __ pop(r4, eq); // Conditional pop instruction
5714 // to get rid of alignment push.
5715 // Store answer in the overwritable heap number.
5716 #if !defined(USE_ARM_EABI)
5717 // Double returned in fp coprocessor register 0 and 1, encoded as register
5718 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
5719 // substract the tag from r4.
5720 __ sub(r5, r4, Operand(kHeapObjectTag));
5721 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
5722 #else
5723 // Double returned in registers 0 and 1.
5724 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
5725 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
5726 #endif
5727 __ mov(r0, Operand(r4));
5728 // And we are done.
5729 __ pop(pc);
5730 }
5731 }
5732 // We jump to here if something goes wrong (one param is not a number of any 5565 // We jump to here if something goes wrong (one param is not a number of any
5733 // sort or new-space allocation fails). 5566 // sort or new-space allocation fails).
5734 __ bind(&slow); 5567 __ bind(&slow);
5735 5568
5736 // Push arguments to the stack 5569 // Push arguments to the stack
5737 __ push(r1); 5570 __ push(r1);
5738 __ push(r0); 5571 __ push(r0);
5739 5572
5740 if (Token::ADD == op_) { 5573 if (Token::ADD == operation) {
5741 // Test for string arguments before calling runtime. 5574 // Test for string arguments before calling runtime.
5742 // r1 : first argument 5575 // r1 : first argument
5743 // r0 : second argument 5576 // r0 : second argument
5744 // sp[0] : second argument 5577 // sp[0] : second argument
5745 // sp[4] : first argument 5578 // sp[4] : first argument
5746 5579
5747 Label not_strings, not_string1, string1, string1_smi2; 5580 Label not_strings, not_string1, string1, string1_smi2;
5748 __ tst(r1, Operand(kSmiTagMask)); 5581 __ tst(r1, Operand(kSmiTagMask));
5749 __ b(eq, &not_string1); 5582 __ b(eq, &not_string1);
5750 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); 5583 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
5782 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); 5615 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
5783 __ b(ge, &not_strings); 5616 __ b(ge, &not_strings);
5784 5617
5785 // Only second argument is a string. 5618 // Only second argument is a string.
5786 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); 5619 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
5787 5620
5788 __ bind(&not_strings); 5621 __ bind(&not_strings);
5789 } 5622 }
5790 5623
5791 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. 5624 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
5625
5626 // We branch here if at least one of r0 and r1 is not a Smi.
5627 __ bind(not_smi);
5628 if (mode == NO_OVERWRITE) {
5629 // In the case where there is no chance of an overwritable float we may as
5630 // well do the allocation immediately while r0 and r1 are untouched.
5631 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5632 }
5633
5634 // Move r0 to a double in r2-r3.
5635 __ tst(r0, Operand(kSmiTagMask));
5636 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5637 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5638 __ b(ne, &slow);
5639 if (mode == OVERWRITE_RIGHT) {
5640 __ mov(r5, Operand(r0)); // Overwrite this heap number.
5641 }
5642 if (use_fp_registers) {
5643 CpuFeatures::Scope scope(VFP3);
5644 // Load the double from tagged HeapNumber r0 to d7.
5645 __ sub(r7, r0, Operand(kHeapObjectTag));
5646 __ vldr(d7, r7, HeapNumber::kValueOffset);
5647 } else {
5648 // Calling convention says that second double is in r2 and r3.
5649 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
5650 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
5651 }
5652 __ jmp(&finished_loading_r0);
5653 __ bind(&r0_is_smi);
5654 if (mode == OVERWRITE_RIGHT) {
5655 // We can't overwrite a Smi so get address of new heap number into r5.
5656 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5657 }
5658
5659 if (use_fp_registers) {
5660 CpuFeatures::Scope scope(VFP3);
5661 // Convert smi in r0 to double in d7.
5662 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5663 __ vmov(s15, r7);
5664 __ vcvt(d7, s15);
5665 } else {
5666 // Write Smi from r0 to r3 and r2 in double format.
5667 __ mov(r7, Operand(r0));
5668 ConvertToDoubleStub stub3(r3, r2, r7, r6);
5669 __ push(lr);
5670 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
5671 __ pop(lr);
5672 }
5673
5674 __ bind(&finished_loading_r0);
5675
5676 // Move r1 to a double in r0-r1.
5677 __ tst(r1, Operand(kSmiTagMask));
5678 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5679 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5680 __ b(ne, &slow);
5681 if (mode == OVERWRITE_LEFT) {
5682 __ mov(r5, Operand(r1)); // Overwrite this heap number.
5683 }
5684 if (use_fp_registers) {
5685 CpuFeatures::Scope scope(VFP3);
5686 // Load the double from tagged HeapNumber r1 to d6.
5687 __ sub(r7, r1, Operand(kHeapObjectTag));
5688 __ vldr(d6, r7, HeapNumber::kValueOffset);
5689 } else {
5690 // Calling convention says that first double is in r0 and r1.
5691 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
5692 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
5693 }
5694 __ jmp(&finished_loading_r1);
5695 __ bind(&r1_is_smi);
5696 if (mode == OVERWRITE_LEFT) {
5697 // We can't overwrite a Smi so get address of new heap number into r5.
5698 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5699 }
5700
5701 if (use_fp_registers) {
5702 CpuFeatures::Scope scope(VFP3);
5703 // Convert smi in r1 to double in d6.
5704 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5705 __ vmov(s13, r7);
5706 __ vcvt(d6, s13);
5707 } else {
5708 // Write Smi from r1 to r1 and r0 in double format.
5709 __ mov(r7, Operand(r1));
5710 ConvertToDoubleStub stub4(r1, r0, r7, r6);
5711 __ push(lr);
5712 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
5713 __ pop(lr);
5714 }
5715
5716 __ bind(&finished_loading_r1);
5717
5718 __ bind(&do_the_call);
5719 // If we are inlining the operation using VFP3 instructions for
5720 // add, subtract, multiply, or divide, the arguments are in d6 and d7.
5721 if (use_fp_registers) {
5722 CpuFeatures::Scope scope(VFP3);
5723 // ARMv7 VFP3 instructions to implement
5724 // double precision, add, subtract, multiply, divide.
5725
5726 if (Token::MUL == operation) {
5727 __ vmul(d5, d6, d7);
5728 } else if (Token::DIV == operation) {
5729 __ vdiv(d5, d6, d7);
5730 } else if (Token::ADD == operation) {
5731 __ vadd(d5, d6, d7);
5732 } else if (Token::SUB == operation) {
5733 __ vsub(d5, d6, d7);
5734 } else {
5735 UNREACHABLE();
5736 }
5737 __ sub(r0, r5, Operand(kHeapObjectTag));
5738 __ vstr(d5, r0, HeapNumber::kValueOffset);
5739 __ add(r0, r0, Operand(kHeapObjectTag));
5740 __ mov(pc, lr);
5741 return;
5742 }
5743
5744 // If we did not inline the operation, then the arguments are in:
5745 // r0: Left value (least significant part of mantissa).
5746 // r1: Left value (sign, exponent, top of mantissa).
5747 // r2: Right value (least significant part of mantissa).
5748 // r3: Right value (sign, exponent, top of mantissa).
5749 // r5: Address of heap number for result.
5750
5751 __ push(lr); // For later.
5752 __ push(r5); // Address of heap number that is answer.
5753 __ AlignStack(0);
5754 // Call C routine that may not cause GC or other trouble.
5755 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
5756 __ Call(r5);
5757 __ pop(r4); // Address of heap number.
5758 __ cmp(r4, Operand(Smi::FromInt(0)));
5759 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
5760 // Store answer in the overwritable heap number.
5761 #if !defined(USE_ARM_EABI)
5762 // Double returned in fp coprocessor register 0 and 1, encoded as register
5763 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
5764 // substract the tag from r4.
5765 __ sub(r5, r4, Operand(kHeapObjectTag));
5766 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
5767 #else
5768 // Double returned in registers 0 and 1.
5769 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
5770 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
5771 #endif
5772 __ mov(r0, Operand(r4));
5773 // And we are done.
5774 __ pop(pc);
5792 } 5775 }
5793 5776
5794 5777
5795 // Tries to get a signed int32 out of a double precision floating point heap 5778 // Tries to get a signed int32 out of a double precision floating point heap
5796 // number. Rounds towards 0. Fastest for doubles that are in the ranges 5779 // number. Rounds towards 0. Fastest for doubles that are in the ranges
5797 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds 5780 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
5798 // almost to the range of signed int32 values that are not Smis. Jumps to the 5781 // almost to the range of signed int32 values that are not Smis. Jumps to the
5799 // label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0 5782 // label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
5800 // (excluding the endpoints). 5783 // (excluding the endpoints).
5801 static void GetInt32(MacroAssembler* masm, 5784 static void GetInt32(MacroAssembler* masm,
(...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after
6115 6098
6116 6099
6117 6100
6118 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { 6101 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
6119 // r1 : x 6102 // r1 : x
6120 // r0 : y 6103 // r0 : y
6121 // result : r0 6104 // result : r0
6122 6105
6123 // All ops need to know whether we are dealing with two Smis. Set up r2 to 6106 // All ops need to know whether we are dealing with two Smis. Set up r2 to
6124 // tell us that. 6107 // tell us that.
6125 if (ShouldGenerateSmiCode()) { 6108 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
6126 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
6127 }
6128 6109
6129 switch (op_) { 6110 switch (op_) {
6130 case Token::ADD: { 6111 case Token::ADD: {
6131 Label not_smi; 6112 Label not_smi;
6132 // Fast path. 6113 // Fast path.
6133 if (ShouldGenerateSmiCode()) { 6114 ASSERT(kSmiTag == 0); // Adjust code below.
6134 ASSERT(kSmiTag == 0); // Adjust code below. 6115 __ tst(r2, Operand(kSmiTagMask));
6135 __ tst(r2, Operand(kSmiTagMask)); 6116 __ b(ne, &not_smi);
6136 __ b(ne, &not_smi); 6117 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
6137 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. 6118 // Return if no overflow.
6138 // Return if no overflow. 6119 __ Ret(vc);
6139 __ Ret(vc); 6120 __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
6140 __ sub(r0, r0, Operand(r1)); // Revert optimistic add. 6121
6141 } 6122 HandleBinaryOpSlowCases(masm,
6142 HandleBinaryOpSlowCases(masm, &not_smi, Builtins::ADD); 6123 &not_smi,
6124 Builtins::ADD,
6125 Token::ADD,
6126 mode_);
6143 break; 6127 break;
6144 } 6128 }
6145 6129
6146 case Token::SUB: { 6130 case Token::SUB: {
6147 Label not_smi; 6131 Label not_smi;
6148 // Fast path. 6132 // Fast path.
6149 if (ShouldGenerateSmiCode()) { 6133 ASSERT(kSmiTag == 0); // Adjust code below.
6150 ASSERT(kSmiTag == 0); // Adjust code below. 6134 __ tst(r2, Operand(kSmiTagMask));
6151 __ tst(r2, Operand(kSmiTagMask)); 6135 __ b(ne, &not_smi);
6152 __ b(ne, &not_smi); 6136 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
6153 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. 6137 // Return if no overflow.
6154 // Return if no overflow. 6138 __ Ret(vc);
6155 __ Ret(vc); 6139 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
6156 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. 6140
6157 } 6141 HandleBinaryOpSlowCases(masm,
6158 HandleBinaryOpSlowCases(masm, &not_smi, Builtins::SUB); 6142 &not_smi,
6143 Builtins::SUB,
6144 Token::SUB,
6145 mode_);
6159 break; 6146 break;
6160 } 6147 }
6161 6148
6162 case Token::MUL: { 6149 case Token::MUL: {
6163 Label not_smi, slow; 6150 Label not_smi, slow;
6164 if (ShouldGenerateSmiCode()) { 6151 ASSERT(kSmiTag == 0); // adjust code below
6165 ASSERT(kSmiTag == 0); // adjust code below 6152 __ tst(r2, Operand(kSmiTagMask));
6166 __ tst(r2, Operand(kSmiTagMask)); 6153 __ b(ne, &not_smi);
6167 __ b(ne, &not_smi); 6154 // Remove tag from one operand (but keep sign), so that result is Smi.
6168 // Remove tag from one operand (but keep sign), so that result is Smi. 6155 __ mov(ip, Operand(r0, ASR, kSmiTagSize));
6169 __ mov(ip, Operand(r0, ASR, kSmiTagSize)); 6156 // Do multiplication
6170 // Do multiplication 6157 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
6171 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1. 6158 // Go slow on overflows (overflow bit is not set).
6172 // Go slow on overflows (overflow bit is not set). 6159 __ mov(ip, Operand(r3, ASR, 31));
6173 __ mov(ip, Operand(r3, ASR, 31)); 6160 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
6174 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical 6161 __ b(ne, &slow);
6175 __ b(ne, &slow); 6162 // Go slow on zero result to handle -0.
6176 // Go slow on zero result to handle -0. 6163 __ tst(r3, Operand(r3));
6177 __ tst(r3, Operand(r3)); 6164 __ mov(r0, Operand(r3), LeaveCC, ne);
6178 __ mov(r0, Operand(r3), LeaveCC, ne); 6165 __ Ret(ne);
6179 __ Ret(ne); 6166 // We need -0 if we were multiplying a negative number with 0 to get 0.
6180 // We need -0 if we were multiplying a negative number with 0 to get 0. 6167 // We know one of them was zero.
6181 // We know one of them was zero. 6168 __ add(r2, r0, Operand(r1), SetCC);
6182 __ add(r2, r0, Operand(r1), SetCC); 6169 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
6183 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl); 6170 __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
6184 __ Ret(pl); // Return Smi 0 if the non-zero one was positive. 6171 // Slow case. We fall through here if we multiplied a negative number
6185 // Slow case. We fall through here if we multiplied a negative number 6172 // with 0, because that would mean we should produce -0.
6186 // with 0, because that would mean we should produce -0. 6173 __ bind(&slow);
6187 __ bind(&slow); 6174
6188 } 6175 HandleBinaryOpSlowCases(masm,
6189 HandleBinaryOpSlowCases(masm, &not_smi, Builtins::MUL); 6176 &not_smi,
6177 Builtins::MUL,
6178 Token::MUL,
6179 mode_);
6190 break; 6180 break;
6191 } 6181 }
6192 6182
6193 case Token::DIV: 6183 case Token::DIV:
6194 case Token::MOD: { 6184 case Token::MOD: {
6195 Label not_smi; 6185 Label not_smi;
6196 if (ShouldGenerateSmiCode()) { 6186 if (specialized_on_rhs_) {
6197 Label smi_is_unsuitable; 6187 Label smi_is_unsuitable;
6198 __ BranchOnNotSmi(r1, &not_smi); 6188 __ BranchOnNotSmi(r1, &not_smi);
6199 if (IsPowerOf2(constant_rhs_)) { 6189 if (IsPowerOf2(constant_rhs_)) {
6200 if (op_ == Token::MOD) { 6190 if (op_ == Token::MOD) {
6201 __ and_(r0, 6191 __ and_(r0,
6202 r1, 6192 r1,
6203 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), 6193 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
6204 SetCC); 6194 SetCC);
6205 // We now have the answer, but if the input was negative we also 6195 // We now have the answer, but if the input was negative we also
6206 // have the sign bit. Our work is done if the result is 6196 // have the sign bit. Our work is done if the result is
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
6266 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC); 6256 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC);
6267 __ b(ne, &smi_is_unsuitable); // There was a remainder. 6257 __ b(ne, &smi_is_unsuitable); // There was a remainder.
6268 __ mov(r0, Operand(r2, LSL, kSmiTagSize)); 6258 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
6269 } else { 6259 } else {
6270 ASSERT(op_ == Token::MOD); 6260 ASSERT(op_ == Token::MOD);
6271 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift)); 6261 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift));
6272 } 6262 }
6273 } 6263 }
6274 __ Ret(); 6264 __ Ret();
6275 __ bind(&smi_is_unsuitable); 6265 __ bind(&smi_is_unsuitable);
6266 } else {
6267 __ jmp(&not_smi);
6276 } 6268 }
6277 HandleBinaryOpSlowCases( 6269 HandleBinaryOpSlowCases(masm,
6278 masm, 6270 &not_smi,
6279 &not_smi, 6271 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
6280 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); 6272 op_,
6273 mode_);
6281 break; 6274 break;
6282 } 6275 }
6283 6276
6284 case Token::BIT_OR: 6277 case Token::BIT_OR:
6285 case Token::BIT_AND: 6278 case Token::BIT_AND:
6286 case Token::BIT_XOR: 6279 case Token::BIT_XOR:
6287 case Token::SAR: 6280 case Token::SAR:
6288 case Token::SHR: 6281 case Token::SHR:
6289 case Token::SHL: { 6282 case Token::SHL: {
6290 Label slow; 6283 Label slow;
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
6330 __ Ret(); 6323 __ Ret();
6331 __ bind(&slow); 6324 __ bind(&slow);
6332 HandleNonSmiBitwiseOp(masm); 6325 HandleNonSmiBitwiseOp(masm);
6333 break; 6326 break;
6334 } 6327 }
6335 6328
6336 default: UNREACHABLE(); 6329 default: UNREACHABLE();
6337 } 6330 }
6338 // This code should be unreachable. 6331 // This code should be unreachable.
6339 __ stop("Unreachable"); 6332 __ stop("Unreachable");
6340
6341 // Generate an unreachable reference to the DEFAULT stub so that it can be
6342 // found at the end of this stub when clearing ICs at GC.
6343 // TODO(kaznacheev): Check performance impact and get rid of this.
6344 if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
6345 GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
6346 __ CallStub(&uninit);
6347 }
6348 }
6349
6350
6351 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
6352 Label get_result;
6353
6354 __ push(r1);
6355 __ push(r0);
6356
6357 // Internal frame is necessary to handle exceptions properly.
6358 __ EnterInternalFrame();
6359 // Call the stub proper to get the result in r0.
6360 __ Call(&get_result);
6361 __ LeaveInternalFrame();
6362
6363 __ push(r0);
6364
6365 __ mov(r0, Operand(Smi::FromInt(MinorKey())));
6366 __ push(r0);
6367 __ mov(r0, Operand(Smi::FromInt(op_)));
6368 __ push(r0);
6369 __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
6370 __ push(r0);
6371
6372 __ TailCallExternalReference(
6373 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
6374 6,
6375 1);
6376
6377 // The entry point for the result calculation is assumed to be immediately
6378 // after this sequence.
6379 __ bind(&get_result);
6380 } 6333 }
6381 6334
6382 6335
6383 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { 6336 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
6384 GenericBinaryOpStub stub(key, type_info); 6337 return Handle<Code>::null();
6385 return stub.GetCode();
6386 } 6338 }
6387 6339
6388 6340
6389 void StackCheckStub::Generate(MacroAssembler* masm) { 6341 void StackCheckStub::Generate(MacroAssembler* masm) {
6390 // Do tail-call to runtime routine. Runtime routines expect at least one 6342 // Do tail-call to runtime routine. Runtime routines expect at least one
6391 // argument, so give it a Smi. 6343 // argument, so give it a Smi.
6392 __ mov(r0, Operand(Smi::FromInt(0))); 6344 __ mov(r0, Operand(Smi::FromInt(0)));
6393 __ push(r0); 6345 __ push(r0);
6394 __ TailCallRuntime(Runtime::kStackGuard, 1, 1); 6346 __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
6395 6347
(...skipping 1694 matching lines...) Expand 10 before | Expand all | Expand 10 after
8090 8042
8091 // Just jump to runtime to add the two strings. 8043 // Just jump to runtime to add the two strings.
8092 __ bind(&string_add_runtime); 8044 __ bind(&string_add_runtime);
8093 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 8045 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
8094 } 8046 }
8095 8047
8096 8048
8097 #undef __ 8049 #undef __
8098 8050
8099 } } // namespace v8::internal 8051 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/codegen-arm.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698