Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(293)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 883001: Porting binary op ICs to arm (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/codegen-arm.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 5505 matching lines...) Expand 10 before | Expand all | Expand 10 after
5516 __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); 5516 __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
5517 } 5517 }
5518 5518
5519 5519
5520 // We fall into this code if the operands were Smis, but the result was 5520 // We fall into this code if the operands were Smis, but the result was
5521 // not (eg. overflow). We branch into this code (to the not_smi label) if 5521 // not (eg. overflow). We branch into this code (to the not_smi label) if
5522 // the operands were not both Smi. The operands are in r0 and r1. In order 5522 // the operands were not both Smi. The operands are in r0 and r1. In order
5523 // to call the C-implemented binary fp operation routines we need to end up 5523 // to call the C-implemented binary fp operation routines we need to end up
5524 // with the double precision floating point operands in r0 and r1 (for the 5524 // with the double precision floating point operands in r0 and r1 (for the
5525 // value in r1) and r2 and r3 (for the value in r0). 5525 // value in r1) and r2 and r3 (for the value in r0).
5526 static void HandleBinaryOpSlowCases(MacroAssembler* masm, 5526 void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
5527 Label* not_smi, 5527 Label* not_smi,
5528 const Builtins::JavaScript& builtin, 5528 const Builtins::JavaScript& builtin) {
5529 Token::Value operation,
5530 OverwriteMode mode) {
5531 Label slow, slow_pop_2_first, do_the_call; 5529 Label slow, slow_pop_2_first, do_the_call;
5532 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; 5530 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
5533 // Smi-smi case (overflow).
5534 // Since both are Smis there is no heap number to overwrite, so allocate.
5535 // The new heap number is in r5. r6 and r7 are scratch.
5536 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5537
5538 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, 5531 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
5539 // using registers d7 and d6 for the double values. 5532 // using registers d7 and d6 for the double values.
5540 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && 5533 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
5541 Token::MOD != operation; 5534 Token::MOD != op_;
5542 if (use_fp_registers) { 5535
5543 CpuFeatures::Scope scope(VFP3); 5536 if (ShouldGenerateSmiCode()) {
5544 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); 5537 // Smi-smi case (overflow).
5545 __ vmov(s15, r7); 5538 // Since both are Smis there is no heap number to overwrite, so allocate.
5546 __ vcvt(d7, s15); 5539 // The new heap number is in r5. r6 and r7 are scratch.
5547 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); 5540 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5548 __ vmov(s13, r7); 5541
5549 __ vcvt(d6, s13); 5542 if (use_fp_registers) {
5550 } else { 5543 CpuFeatures::Scope scope(VFP3);
5551 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. 5544 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5552 __ mov(r7, Operand(r0)); 5545 __ vmov(s15, r7);
5553 ConvertToDoubleStub stub1(r3, r2, r7, r6); 5546 __ vcvt(d7, s15);
5554 __ push(lr); 5547 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5555 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); 5548 __ vmov(s13, r7);
5556 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. 5549 __ vcvt(d6, s13);
5557 __ mov(r7, Operand(r1)); 5550 } else {
5558 ConvertToDoubleStub stub2(r1, r0, r7, r6); 5551 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
5559 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); 5552 __ mov(r7, Operand(r0));
5560 __ pop(lr); 5553 ConvertToDoubleStub stub1(r3, r2, r7, r6);
5554 __ push(lr);
5555 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
5556 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
5557 __ mov(r7, Operand(r1));
5558 ConvertToDoubleStub stub2(r1, r0, r7, r6);
5559 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
5560 __ pop(lr);
5561 }
5562
5563 __ jmp(&do_the_call); // Tail call. No return.
5561 } 5564 }
5562 5565
5563 __ jmp(&do_the_call); // Tail call. No return. 5566 // We branch here if at least one of r0 and r1 is not a Smi.
5564 5567 __ bind(not_smi);
5568
5569 if (ShouldGenerateFPCode()) {
5570 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
5571 switch (op_) {
5572 case Token::ADD:
5573 case Token::SUB:
5574 case Token::MUL:
5575 case Token::DIV:
5576 GenerateTypeTransition(masm);
5577 break;
5578
5579 default:
5580 break;
5581 }
5582 }
5583
5584 if (mode_ == NO_OVERWRITE) {
5585 // In the case where there is no chance of an overwritable float we may as
5586 // well do the allocation immediately while r0 and r1 are untouched.
5587 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5588 }
5589
5590 // Move r0 to a double in r2-r3.
5591 __ tst(r0, Operand(kSmiTagMask));
5592 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5593 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5594 __ b(ne, &slow);
5595 if (mode_ == OVERWRITE_RIGHT) {
5596 __ mov(r5, Operand(r0)); // Overwrite this heap number.
5597 }
5598 if (use_fp_registers) {
5599 CpuFeatures::Scope scope(VFP3);
5600 // Load the double from tagged HeapNumber r0 to d7.
5601 __ sub(r7, r0, Operand(kHeapObjectTag));
5602 __ vldr(d7, r7, HeapNumber::kValueOffset);
5603 } else {
5604 // Calling convention says that second double is in r2 and r3.
5605 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
5606 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
5607 }
5608 __ jmp(&finished_loading_r0);
5609 __ bind(&r0_is_smi);
5610 if (mode_ == OVERWRITE_RIGHT) {
5611 // We can't overwrite a Smi so get address of new heap number into r5.
5612 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5613 }
5614
5615 if (use_fp_registers) {
5616 CpuFeatures::Scope scope(VFP3);
5617 // Convert smi in r0 to double in d7.
5618 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5619 __ vmov(s15, r7);
5620 __ vcvt(d7, s15);
5621 } else {
5622 // Write Smi from r0 to r3 and r2 in double format.
5623 __ mov(r7, Operand(r0));
5624 ConvertToDoubleStub stub3(r3, r2, r7, r6);
5625 __ push(lr);
5626 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
5627 __ pop(lr);
5628 }
5629
5630 __ bind(&finished_loading_r0);
5631
5632 // Move r1 to a double in r0-r1.
5633 __ tst(r1, Operand(kSmiTagMask));
5634 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5635 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5636 __ b(ne, &slow);
5637 if (mode_ == OVERWRITE_LEFT) {
5638 __ mov(r5, Operand(r1)); // Overwrite this heap number.
5639 }
5640 if (use_fp_registers) {
5641 CpuFeatures::Scope scope(VFP3);
5642 // Load the double from tagged HeapNumber r1 to d6.
5643 __ sub(r7, r1, Operand(kHeapObjectTag));
5644 __ vldr(d6, r7, HeapNumber::kValueOffset);
5645 } else {
5646 // Calling convention says that first double is in r0 and r1.
5647 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
5648 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
5649 }
5650 __ jmp(&finished_loading_r1);
5651 __ bind(&r1_is_smi);
5652 if (mode_ == OVERWRITE_LEFT) {
5653 // We can't overwrite a Smi so get address of new heap number into r5.
5654 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5655 }
5656
5657 if (use_fp_registers) {
5658 CpuFeatures::Scope scope(VFP3);
5659 // Convert smi in r1 to double in d6.
5660 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5661 __ vmov(s13, r7);
5662 __ vcvt(d6, s13);
5663 } else {
5664 // Write Smi from r1 to r1 and r0 in double format.
5665 __ mov(r7, Operand(r1));
5666 ConvertToDoubleStub stub4(r1, r0, r7, r6);
5667 __ push(lr);
5668 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
5669 __ pop(lr);
5670 }
5671
5672 __ bind(&finished_loading_r1);
5673
5674 __ bind(&do_the_call);
5675 // If we are inlining the operation using VFP3 instructions for
5676 // add, subtract, multiply, or divide, the arguments are in d6 and d7.
5677 if (use_fp_registers) {
5678 CpuFeatures::Scope scope(VFP3);
5679 // ARMv7 VFP3 instructions to implement
5680 // double precision, add, subtract, multiply, divide.
5681
5682 if (Token::MUL == op_) {
5683 __ vmul(d5, d6, d7);
5684 } else if (Token::DIV == op_) {
5685 __ vdiv(d5, d6, d7);
5686 } else if (Token::ADD == op_) {
5687 __ vadd(d5, d6, d7);
5688 } else if (Token::SUB == op_) {
5689 __ vsub(d5, d6, d7);
5690 } else {
5691 UNREACHABLE();
5692 }
5693 __ sub(r0, r5, Operand(kHeapObjectTag));
5694 __ vstr(d5, r0, HeapNumber::kValueOffset);
5695 __ add(r0, r0, Operand(kHeapObjectTag));
5696 __ mov(pc, lr);
5697 } else {
5698 // If we did not inline the operation, then the arguments are in:
5699 // r0: Left value (least significant part of mantissa).
5700 // r1: Left value (sign, exponent, top of mantissa).
5701 // r2: Right value (least significant part of mantissa).
5702 // r3: Right value (sign, exponent, top of mantissa).
5703 // r5: Address of heap number for result.
5704
5705 __ push(lr); // For later.
5706 __ push(r5); // Address of heap number that is answer.
5707 __ AlignStack(0);
5708 // Call C routine that may not cause GC or other trouble.
5709 __ mov(r5, Operand(ExternalReference::double_fp_operation(op_)));
5710 __ Call(r5);
5711 __ pop(r4); // Address of heap number.
5712 __ cmp(r4, Operand(Smi::FromInt(0)));
5713 __ pop(r4, eq); // Conditional pop instruction
5714 // to get rid of alignment push.
5715 // Store answer in the overwritable heap number.
5716 #if !defined(USE_ARM_EABI)
5717 // Double returned in fp coprocessor register 0 and 1, encoded as register
5718 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
5719 // substract the tag from r4.
5720 __ sub(r5, r4, Operand(kHeapObjectTag));
5721 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
5722 #else
5723 // Double returned in registers 0 and 1.
5724 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
5725 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
5726 #endif
5727 __ mov(r0, Operand(r4));
5728 // And we are done.
5729 __ pop(pc);
5730 }
5731 }
5565 // We jump to here if something goes wrong (one param is not a number of any 5732 // We jump to here if something goes wrong (one param is not a number of any
5566 // sort or new-space allocation fails). 5733 // sort or new-space allocation fails).
5567 __ bind(&slow); 5734 __ bind(&slow);
5568 5735
5569 // Push arguments to the stack 5736 // Push arguments to the stack
5570 __ push(r1); 5737 __ push(r1);
5571 __ push(r0); 5738 __ push(r0);
5572 5739
5573 if (Token::ADD == operation) { 5740 if (Token::ADD == op_) {
5574 // Test for string arguments before calling runtime. 5741 // Test for string arguments before calling runtime.
5575 // r1 : first argument 5742 // r1 : first argument
5576 // r0 : second argument 5743 // r0 : second argument
5577 // sp[0] : second argument 5744 // sp[0] : second argument
5578 // sp[4] : first argument 5745 // sp[4] : first argument
5579 5746
5580 Label not_strings, not_string1, string1, string1_smi2; 5747 Label not_strings, not_string1, string1, string1_smi2;
5581 __ tst(r1, Operand(kSmiTagMask)); 5748 __ tst(r1, Operand(kSmiTagMask));
5582 __ b(eq, &not_string1); 5749 __ b(eq, &not_string1);
5583 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); 5750 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
5615 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); 5782 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
5616 __ b(ge, &not_strings); 5783 __ b(ge, &not_strings);
5617 5784
5618 // Only second argument is a string. 5785 // Only second argument is a string.
5619 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); 5786 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
5620 5787
5621 __ bind(&not_strings); 5788 __ bind(&not_strings);
5622 } 5789 }
5623 5790
5624 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. 5791 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
5625
5626 // We branch here if at least one of r0 and r1 is not a Smi.
5627 __ bind(not_smi);
5628 if (mode == NO_OVERWRITE) {
5629 // In the case where there is no chance of an overwritable float we may as
5630 // well do the allocation immediately while r0 and r1 are untouched.
5631 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5632 }
5633
5634 // Move r0 to a double in r2-r3.
5635 __ tst(r0, Operand(kSmiTagMask));
5636 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
5637 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
5638 __ b(ne, &slow);
5639 if (mode == OVERWRITE_RIGHT) {
5640 __ mov(r5, Operand(r0)); // Overwrite this heap number.
5641 }
5642 if (use_fp_registers) {
5643 CpuFeatures::Scope scope(VFP3);
5644 // Load the double from tagged HeapNumber r0 to d7.
5645 __ sub(r7, r0, Operand(kHeapObjectTag));
5646 __ vldr(d7, r7, HeapNumber::kValueOffset);
5647 } else {
5648 // Calling convention says that second double is in r2 and r3.
5649 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
5650 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
5651 }
5652 __ jmp(&finished_loading_r0);
5653 __ bind(&r0_is_smi);
5654 if (mode == OVERWRITE_RIGHT) {
5655 // We can't overwrite a Smi so get address of new heap number into r5.
5656 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5657 }
5658
5659 if (use_fp_registers) {
5660 CpuFeatures::Scope scope(VFP3);
5661 // Convert smi in r0 to double in d7.
5662 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
5663 __ vmov(s15, r7);
5664 __ vcvt(d7, s15);
5665 } else {
5666 // Write Smi from r0 to r3 and r2 in double format.
5667 __ mov(r7, Operand(r0));
5668 ConvertToDoubleStub stub3(r3, r2, r7, r6);
5669 __ push(lr);
5670 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
5671 __ pop(lr);
5672 }
5673
5674 __ bind(&finished_loading_r0);
5675
5676 // Move r1 to a double in r0-r1.
5677 __ tst(r1, Operand(kSmiTagMask));
5678 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
5679 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
5680 __ b(ne, &slow);
5681 if (mode == OVERWRITE_LEFT) {
5682 __ mov(r5, Operand(r1)); // Overwrite this heap number.
5683 }
5684 if (use_fp_registers) {
5685 CpuFeatures::Scope scope(VFP3);
5686 // Load the double from tagged HeapNumber r1 to d6.
5687 __ sub(r7, r1, Operand(kHeapObjectTag));
5688 __ vldr(d6, r7, HeapNumber::kValueOffset);
5689 } else {
5690 // Calling convention says that first double is in r0 and r1.
5691 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
5692 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
5693 }
5694 __ jmp(&finished_loading_r1);
5695 __ bind(&r1_is_smi);
5696 if (mode == OVERWRITE_LEFT) {
5697 // We can't overwrite a Smi so get address of new heap number into r5.
5698 AllocateHeapNumber(masm, &slow, r5, r6, r7);
5699 }
5700
5701 if (use_fp_registers) {
5702 CpuFeatures::Scope scope(VFP3);
5703 // Convert smi in r1 to double in d6.
5704 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
5705 __ vmov(s13, r7);
5706 __ vcvt(d6, s13);
5707 } else {
5708 // Write Smi from r1 to r1 and r0 in double format.
5709 __ mov(r7, Operand(r1));
5710 ConvertToDoubleStub stub4(r1, r0, r7, r6);
5711 __ push(lr);
5712 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
5713 __ pop(lr);
5714 }
5715
5716 __ bind(&finished_loading_r1);
5717
5718 __ bind(&do_the_call);
5719 // If we are inlining the operation using VFP3 instructions for
5720 // add, subtract, multiply, or divide, the arguments are in d6 and d7.
5721 if (use_fp_registers) {
5722 CpuFeatures::Scope scope(VFP3);
5723 // ARMv7 VFP3 instructions to implement
5724 // double precision, add, subtract, multiply, divide.
5725
5726 if (Token::MUL == operation) {
5727 __ vmul(d5, d6, d7);
5728 } else if (Token::DIV == operation) {
5729 __ vdiv(d5, d6, d7);
5730 } else if (Token::ADD == operation) {
5731 __ vadd(d5, d6, d7);
5732 } else if (Token::SUB == operation) {
5733 __ vsub(d5, d6, d7);
5734 } else {
5735 UNREACHABLE();
5736 }
5737 __ sub(r0, r5, Operand(kHeapObjectTag));
5738 __ vstr(d5, r0, HeapNumber::kValueOffset);
5739 __ add(r0, r0, Operand(kHeapObjectTag));
5740 __ mov(pc, lr);
5741 return;
5742 }
5743
5744 // If we did not inline the operation, then the arguments are in:
5745 // r0: Left value (least significant part of mantissa).
5746 // r1: Left value (sign, exponent, top of mantissa).
5747 // r2: Right value (least significant part of mantissa).
5748 // r3: Right value (sign, exponent, top of mantissa).
5749 // r5: Address of heap number for result.
5750
5751 __ push(lr); // For later.
5752 __ push(r5); // Address of heap number that is answer.
5753 __ AlignStack(0);
5754 // Call C routine that may not cause GC or other trouble.
5755 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
5756 __ Call(r5);
5757 __ pop(r4); // Address of heap number.
5758 __ cmp(r4, Operand(Smi::FromInt(0)));
5759 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
5760 // Store answer in the overwritable heap number.
5761 #if !defined(USE_ARM_EABI)
5762 // Double returned in fp coprocessor register 0 and 1, encoded as register
5763 // cr8. Offsets must be divisible by 4 for coprocessor so we need to
5764 // substract the tag from r4.
5765 __ sub(r5, r4, Operand(kHeapObjectTag));
5766 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
5767 #else
5768 // Double returned in registers 0 and 1.
5769 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
5770 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
5771 #endif
5772 __ mov(r0, Operand(r4));
5773 // And we are done.
5774 __ pop(pc);
5775 } 5792 }
5776 5793
5777 5794
5778 // Tries to get a signed int32 out of a double precision floating point heap 5795 // Tries to get a signed int32 out of a double precision floating point heap
5779 // number. Rounds towards 0. Fastest for doubles that are in the ranges 5796 // number. Rounds towards 0. Fastest for doubles that are in the ranges
5780 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds 5797 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
5781 // almost to the range of signed int32 values that are not Smis. Jumps to the 5798 // almost to the range of signed int32 values that are not Smis. Jumps to the
5782 // label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0 5799 // label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
5783 // (excluding the endpoints). 5800 // (excluding the endpoints).
5784 static void GetInt32(MacroAssembler* masm, 5801 static void GetInt32(MacroAssembler* masm,
(...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after
6098 6115
6099 6116
6100 6117
6101 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { 6118 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
6102 // r1 : x 6119 // r1 : x
6103 // r0 : y 6120 // r0 : y
6104 // result : r0 6121 // result : r0
6105 6122
6106 // All ops need to know whether we are dealing with two Smis. Set up r2 to 6123 // All ops need to know whether we are dealing with two Smis. Set up r2 to
6107 // tell us that. 6124 // tell us that.
6108 __ orr(r2, r1, Operand(r0)); // r2 = x | y; 6125 if (ShouldGenerateSmiCode()) {
6126 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
6127 }
6109 6128
6110 switch (op_) { 6129 switch (op_) {
6111 case Token::ADD: { 6130 case Token::ADD: {
6112 Label not_smi; 6131 Label not_smi;
6113 // Fast path. 6132 // Fast path.
6114 ASSERT(kSmiTag == 0); // Adjust code below. 6133 if (ShouldGenerateSmiCode()) {
6115 __ tst(r2, Operand(kSmiTagMask)); 6134 ASSERT(kSmiTag == 0); // Adjust code below.
6116 __ b(ne, &not_smi); 6135 __ tst(r2, Operand(kSmiTagMask));
6117 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. 6136 __ b(ne, &not_smi);
6118 // Return if no overflow. 6137 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
6119 __ Ret(vc); 6138 // Return if no overflow.
6120 __ sub(r0, r0, Operand(r1)); // Revert optimistic add. 6139 __ Ret(vc);
6121 6140 __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
6122 HandleBinaryOpSlowCases(masm, 6141 }
6123 &not_smi, 6142 HandleBinaryOpSlowCases(masm, &not_smi, Builtins::ADD);
6124 Builtins::ADD,
6125 Token::ADD,
6126 mode_);
6127 break; 6143 break;
6128 } 6144 }
6129 6145
6130 case Token::SUB: { 6146 case Token::SUB: {
6131 Label not_smi; 6147 Label not_smi;
6132 // Fast path. 6148 // Fast path.
6133 ASSERT(kSmiTag == 0); // Adjust code below. 6149 if (ShouldGenerateSmiCode()) {
6134 __ tst(r2, Operand(kSmiTagMask)); 6150 ASSERT(kSmiTag == 0); // Adjust code below.
6135 __ b(ne, &not_smi); 6151 __ tst(r2, Operand(kSmiTagMask));
6136 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. 6152 __ b(ne, &not_smi);
6137 // Return if no overflow. 6153 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
6138 __ Ret(vc); 6154 // Return if no overflow.
6139 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. 6155 __ Ret(vc);
6140 6156 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
6141 HandleBinaryOpSlowCases(masm, 6157 }
6142 &not_smi, 6158 HandleBinaryOpSlowCases(masm, &not_smi, Builtins::SUB);
6143 Builtins::SUB,
6144 Token::SUB,
6145 mode_);
6146 break; 6159 break;
6147 } 6160 }
6148 6161
6149 case Token::MUL: { 6162 case Token::MUL: {
6150 Label not_smi, slow; 6163 Label not_smi, slow;
6151 ASSERT(kSmiTag == 0); // adjust code below 6164 if (ShouldGenerateSmiCode()) {
6152 __ tst(r2, Operand(kSmiTagMask)); 6165 ASSERT(kSmiTag == 0); // adjust code below
6153 __ b(ne, &not_smi); 6166 __ tst(r2, Operand(kSmiTagMask));
6154 // Remove tag from one operand (but keep sign), so that result is Smi. 6167 __ b(ne, &not_smi);
6155 __ mov(ip, Operand(r0, ASR, kSmiTagSize)); 6168 // Remove tag from one operand (but keep sign), so that result is Smi.
6156 // Do multiplication 6169 __ mov(ip, Operand(r0, ASR, kSmiTagSize));
6157 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1. 6170 // Do multiplication
6158 // Go slow on overflows (overflow bit is not set). 6171 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
6159 __ mov(ip, Operand(r3, ASR, 31)); 6172 // Go slow on overflows (overflow bit is not set).
6160 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical 6173 __ mov(ip, Operand(r3, ASR, 31));
6161 __ b(ne, &slow); 6174 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
6162 // Go slow on zero result to handle -0. 6175 __ b(ne, &slow);
6163 __ tst(r3, Operand(r3)); 6176 // Go slow on zero result to handle -0.
6164 __ mov(r0, Operand(r3), LeaveCC, ne); 6177 __ tst(r3, Operand(r3));
6165 __ Ret(ne); 6178 __ mov(r0, Operand(r3), LeaveCC, ne);
6166 // We need -0 if we were multiplying a negative number with 0 to get 0. 6179 __ Ret(ne);
6167 // We know one of them was zero. 6180 // We need -0 if we were multiplying a negative number with 0 to get 0.
6168 __ add(r2, r0, Operand(r1), SetCC); 6181 // We know one of them was zero.
6169 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl); 6182 __ add(r2, r0, Operand(r1), SetCC);
6170 __ Ret(pl); // Return Smi 0 if the non-zero one was positive. 6183 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
6171 // Slow case. We fall through here if we multiplied a negative number 6184 __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
6172 // with 0, because that would mean we should produce -0. 6185 // Slow case. We fall through here if we multiplied a negative number
6173 __ bind(&slow); 6186 // with 0, because that would mean we should produce -0.
6174 6187 __ bind(&slow);
6175 HandleBinaryOpSlowCases(masm, 6188 }
6176 &not_smi, 6189 HandleBinaryOpSlowCases(masm, &not_smi, Builtins::MUL);
6177 Builtins::MUL,
6178 Token::MUL,
6179 mode_);
6180 break; 6190 break;
6181 } 6191 }
6182 6192
6183 case Token::DIV: 6193 case Token::DIV:
6184 case Token::MOD: { 6194 case Token::MOD: {
6185 Label not_smi; 6195 Label not_smi;
6186 if (specialized_on_rhs_) { 6196 if (ShouldGenerateSmiCode()) {
6187 Label smi_is_unsuitable; 6197 Label smi_is_unsuitable;
6188 __ BranchOnNotSmi(r1, &not_smi); 6198 __ BranchOnNotSmi(r1, &not_smi);
6189 if (IsPowerOf2(constant_rhs_)) { 6199 if (IsPowerOf2(constant_rhs_)) {
6190 if (op_ == Token::MOD) { 6200 if (op_ == Token::MOD) {
6191 __ and_(r0, 6201 __ and_(r0,
6192 r1, 6202 r1,
6193 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), 6203 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
6194 SetCC); 6204 SetCC);
6195 // We now have the answer, but if the input was negative we also 6205 // We now have the answer, but if the input was negative we also
6196 // have the sign bit. Our work is done if the result is 6206 // have the sign bit. Our work is done if the result is
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
6256 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC); 6266 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC);
6257 __ b(ne, &smi_is_unsuitable); // There was a remainder. 6267 __ b(ne, &smi_is_unsuitable); // There was a remainder.
6258 __ mov(r0, Operand(r2, LSL, kSmiTagSize)); 6268 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
6259 } else { 6269 } else {
6260 ASSERT(op_ == Token::MOD); 6270 ASSERT(op_ == Token::MOD);
6261 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift)); 6271 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift));
6262 } 6272 }
6263 } 6273 }
6264 __ Ret(); 6274 __ Ret();
6265 __ bind(&smi_is_unsuitable); 6275 __ bind(&smi_is_unsuitable);
6266 } else {
6267 __ jmp(&not_smi);
6268 } 6276 }
6269 HandleBinaryOpSlowCases(masm, 6277 HandleBinaryOpSlowCases(
6270 &not_smi, 6278 masm,
6271 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV, 6279 &not_smi,
6272 op_, 6280 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
6273 mode_);
6274 break; 6281 break;
6275 } 6282 }
6276 6283
6277 case Token::BIT_OR: 6284 case Token::BIT_OR:
6278 case Token::BIT_AND: 6285 case Token::BIT_AND:
6279 case Token::BIT_XOR: 6286 case Token::BIT_XOR:
6280 case Token::SAR: 6287 case Token::SAR:
6281 case Token::SHR: 6288 case Token::SHR:
6282 case Token::SHL: { 6289 case Token::SHL: {
6283 Label slow; 6290 Label slow;
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
6323 __ Ret(); 6330 __ Ret();
6324 __ bind(&slow); 6331 __ bind(&slow);
6325 HandleNonSmiBitwiseOp(masm); 6332 HandleNonSmiBitwiseOp(masm);
6326 break; 6333 break;
6327 } 6334 }
6328 6335
6329 default: UNREACHABLE(); 6336 default: UNREACHABLE();
6330 } 6337 }
6331 // This code should be unreachable. 6338 // This code should be unreachable.
6332 __ stop("Unreachable"); 6339 __ stop("Unreachable");
6340
6341 // Generate an unreachable reference to the DEFAULT stub so that it can be
Mads Ager (chromium) 2010/03/22 10:17:50 Please add the TODO to get rid of this before subm
6342 // found at the end of this stub when clearing ICs at GC.
6343 if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
6344 GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
6345 __ CallStub(&uninit);
6346 }
6347 }
6348
6349
6350 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
6351 Label get_result;
6352
6353 __ push(r1);
6354 __ push(r0);
6355
6356 // Internal frame is necessary to handle exceptions properly.
6357 __ EnterInternalFrame();
6358 // Call the stub proper to get the result in r0.
6359 __ Call(&get_result);
6360 __ LeaveInternalFrame();
6361
6362 __ push(r0);
6363
6364 __ mov(r0, Operand(Smi::FromInt(MinorKey())));
6365 __ push(r0);
6366 __ mov(r0, Operand(Smi::FromInt(op_)));
6367 __ push(r0);
6368 __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
6369 __ push(r0);
6370
6371 __ TailCallExternalReference(
6372 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
6373 6,
6374 1);
6375
6376 // The entry point for the result calculation is assumed to be immediately
6377 // after this sequence.
6378 __ bind(&get_result);
6333 } 6379 }
6334 6380
6335 6381
6336 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { 6382 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
6337 return Handle<Code>::null(); 6383 GenericBinaryOpStub stub(key, type_info);
6384 return stub.GetCode();
6338 } 6385 }
6339 6386
6340 6387
6341 void StackCheckStub::Generate(MacroAssembler* masm) { 6388 void StackCheckStub::Generate(MacroAssembler* masm) {
6342 // Do tail-call to runtime routine. Runtime routines expect at least one 6389 // Do tail-call to runtime routine. Runtime routines expect at least one
6343 // argument, so give it a Smi. 6390 // argument, so give it a Smi.
6344 __ mov(r0, Operand(Smi::FromInt(0))); 6391 __ mov(r0, Operand(Smi::FromInt(0)));
6345 __ push(r0); 6392 __ push(r0);
6346 __ TailCallRuntime(Runtime::kStackGuard, 1, 1); 6393 __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
6347 6394
(...skipping 1694 matching lines...) Expand 10 before | Expand all | Expand 10 after
8042 8089
8043 // Just jump to runtime to add the two strings. 8090 // Just jump to runtime to add the two strings.
8044 __ bind(&string_add_runtime); 8091 __ bind(&string_add_runtime);
8045 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 8092 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
8046 } 8093 }
8047 8094
8048 8095
8049 #undef __ 8096 #undef __
8050 8097
8051 } } // namespace v8::internal 8098 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/codegen-arm.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698