OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 5434 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5445 __ InvokeBuiltin(native, JUMP_JS); | 5445 __ InvokeBuiltin(native, JUMP_JS); |
5446 } | 5446 } |
5447 | 5447 |
5448 | 5448 |
5449 // We fall into this code if the operands were Smis, but the result was | 5449 // We fall into this code if the operands were Smis, but the result was |
5450 // not (eg. overflow). We branch into this code (to the not_smi label) if | 5450 // not (eg. overflow). We branch into this code (to the not_smi label) if |
5451 // the operands were not both Smi. The operands are in r0 and r1. In order | 5451 // the operands were not both Smi. The operands are in r0 and r1. In order |
5452 // to call the C-implemented binary fp operation routines we need to end up | 5452 // to call the C-implemented binary fp operation routines we need to end up |
5453 // with the double precision floating point operands in r0 and r1 (for the | 5453 // with the double precision floating point operands in r0 and r1 (for the |
5454 // value in r1) and r2 and r3 (for the value in r0). | 5454 // value in r1) and r2 and r3 (for the value in r0). |
5455 static void HandleBinaryOpSlowCases(MacroAssembler* masm, | 5455 void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, |
5456 Label* not_smi, | 5456 Label* not_smi, |
5457 const Builtins::JavaScript& builtin, | 5457 const Builtins::JavaScript& builtin) { |
5458 Token::Value operation, | |
5459 OverwriteMode mode) { | |
5460 Label slow, slow_pop_2_first, do_the_call; | 5458 Label slow, slow_pop_2_first, do_the_call; |
5461 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | 5459 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; |
5462 // Smi-smi case (overflow). | 5460 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; |
5463 // Since both are Smis there is no heap number to overwrite, so allocate. | 5461 |
5464 // The new heap number is in r5. r6 and r7 are scratch. | 5462 if (ShouldGenerateSmiCode()) { |
5465 __ AllocateHeapNumber(r5, r6, r7, &slow); | 5463 // Smi-smi case (overflow). |
5466 | 5464 // Since both are Smis there is no heap number to overwrite, so allocate. |
5467 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, | 5465 // The new heap number is in r5. r6 and r7 are scratch. |
5468 // using registers d7 and d6 for the double values. | 5466 __ AllocateHeapNumber(r5, r6, r7, &slow); |
5469 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && | 5467 |
5470 Token::MOD != operation; | 5468 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, |
5471 if (use_fp_registers) { | 5469 // using registers d7 and d6 for the double values. |
5472 CpuFeatures::Scope scope(VFP3); | 5470 if (use_fp_registers) { |
5473 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | 5471 CpuFeatures::Scope scope(VFP3); |
5474 __ vmov(s15, r7); | 5472 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
5475 __ vcvt_f64_s32(d7, s15); | 5473 __ vmov(s15, r7); |
5476 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); | 5474 __ vcvt_f64_s32(d7, s15); |
5477 __ vmov(s13, r7); | 5475 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); |
5478 __ vcvt_f64_s32(d6, s13); | 5476 __ vmov(s13, r7); |
5479 } else { | 5477 __ vcvt_f64_s32(d6, s13); |
5480 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. | 5478 } else { |
5481 __ mov(r7, Operand(r0)); | 5479 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. |
5482 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 5480 __ mov(r7, Operand(r0)); |
5483 __ push(lr); | 5481 ConvertToDoubleStub stub1(r3, r2, r7, r6); |
5484 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 5482 __ push(lr); |
5485 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. | 5483 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
5486 __ mov(r7, Operand(r1)); | 5484 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. |
5487 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 5485 __ mov(r7, Operand(r1)); |
5488 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 5486 ConvertToDoubleStub stub2(r1, r0, r7, r6); |
5489 __ pop(lr); | 5487 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
| 5488 __ pop(lr); |
| 5489 } |
| 5490 __ jmp(&do_the_call); // Tail call. No return. |
5490 } | 5491 } |
5491 | 5492 |
5492 __ jmp(&do_the_call); // Tail call. No return. | 5493 // We branch here if at least one of r0 and r1 is not a Smi. |
5493 | 5494 __ bind(not_smi); |
| 5495 |
| 5496 if (ShouldGenerateFPCode()) { |
| 5497 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { |
| 5498 switch (op_) { |
| 5499 case Token::ADD: |
| 5500 case Token::SUB: |
| 5501 case Token::MUL: |
| 5502 case Token::DIV: |
| 5503 GenerateTypeTransition(masm); |
| 5504 break; |
| 5505 |
| 5506 default: |
| 5507 break; |
| 5508 } |
| 5509 } |
| 5510 |
| 5511 if (mode_ == NO_OVERWRITE) { |
| 5512 // In the case where there is no chance of an overwritable float we may as |
| 5513 // well do the allocation immediately while r0 and r1 are untouched. |
| 5514 __ AllocateHeapNumber(r5, r6, r7, &slow); |
| 5515 } |
| 5516 |
| 5517 // Move r0 to a double in r2-r3. |
| 5518 __ tst(r0, Operand(kSmiTagMask)); |
| 5519 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. |
| 5520 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); |
| 5521 __ b(ne, &slow); |
| 5522 if (mode_ == OVERWRITE_RIGHT) { |
| 5523 __ mov(r5, Operand(r0)); // Overwrite this heap number. |
| 5524 } |
| 5525 if (use_fp_registers) { |
| 5526 CpuFeatures::Scope scope(VFP3); |
| 5527 // Load the double from tagged HeapNumber r0 to d7. |
| 5528 __ sub(r7, r0, Operand(kHeapObjectTag)); |
| 5529 __ vldr(d7, r7, HeapNumber::kValueOffset); |
| 5530 } else { |
| 5531 // Calling convention says that second double is in r2 and r3. |
| 5532 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
| 5533 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); |
| 5534 } |
| 5535 __ jmp(&finished_loading_r0); |
| 5536 __ bind(&r0_is_smi); |
| 5537 if (mode_ == OVERWRITE_RIGHT) { |
| 5538 // We can't overwrite a Smi so get address of new heap number into r5. |
| 5539 __ AllocateHeapNumber(r5, r6, r7, &slow); |
| 5540 } |
| 5541 |
| 5542 if (use_fp_registers) { |
| 5543 CpuFeatures::Scope scope(VFP3); |
| 5544 // Convert smi in r0 to double in d7. |
| 5545 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
| 5546 __ vmov(s15, r7); |
| 5547 __ vcvt_f64_s32(d7, s15); |
| 5548 } else { |
| 5549 // Write Smi from r0 to r3 and r2 in double format. |
| 5550 __ mov(r7, Operand(r0)); |
| 5551 ConvertToDoubleStub stub3(r3, r2, r7, r6); |
| 5552 __ push(lr); |
| 5553 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); |
| 5554 __ pop(lr); |
| 5555 } |
| 5556 |
| 5557 __ bind(&finished_loading_r0); |
| 5558 |
| 5559 // Move r1 to a double in r0-r1. |
| 5560 __ tst(r1, Operand(kSmiTagMask)); |
| 5561 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. |
| 5562 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); |
| 5563 __ b(ne, &slow); |
| 5564 if (mode_ == OVERWRITE_LEFT) { |
| 5565 __ mov(r5, Operand(r1)); // Overwrite this heap number. |
| 5566 } |
| 5567 if (use_fp_registers) { |
| 5568 CpuFeatures::Scope scope(VFP3); |
| 5569 // Load the double from tagged HeapNumber r1 to d6. |
| 5570 __ sub(r7, r1, Operand(kHeapObjectTag)); |
| 5571 __ vldr(d6, r7, HeapNumber::kValueOffset); |
| 5572 } else { |
| 5573 // Calling convention says that first double is in r0 and r1. |
| 5574 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
| 5575 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); |
| 5576 } |
| 5577 __ jmp(&finished_loading_r1); |
| 5578 __ bind(&r1_is_smi); |
| 5579 if (mode_ == OVERWRITE_LEFT) { |
| 5580 // We can't overwrite a Smi so get address of new heap number into r5. |
| 5581 __ AllocateHeapNumber(r5, r6, r7, &slow); |
| 5582 } |
| 5583 |
| 5584 if (use_fp_registers) { |
| 5585 CpuFeatures::Scope scope(VFP3); |
| 5586 // Convert smi in r1 to double in d6. |
| 5587 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); |
| 5588 __ vmov(s13, r7); |
| 5589 __ vcvt_f64_s32(d6, s13); |
| 5590 } else { |
| 5591 // Write Smi from r1 to r1 and r0 in double format. |
| 5592 __ mov(r7, Operand(r1)); |
| 5593 ConvertToDoubleStub stub4(r1, r0, r7, r6); |
| 5594 __ push(lr); |
| 5595 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); |
| 5596 __ pop(lr); |
| 5597 } |
| 5598 |
| 5599 __ bind(&finished_loading_r1); |
| 5600 |
| 5601 __ bind(&do_the_call); |
| 5602 // If we are inlining the operation using VFP3 instructions for |
| 5603 // add, subtract, multiply, or divide, the arguments are in d6 and d7. |
| 5604 if (use_fp_registers) { |
| 5605 CpuFeatures::Scope scope(VFP3); |
| 5606 // ARMv7 VFP3 instructions to implement |
| 5607 // double precision, add, subtract, multiply, divide. |
| 5608 |
| 5609 if (Token::MUL == op_) { |
| 5610 __ vmul(d5, d6, d7); |
| 5611 } else if (Token::DIV == op_) { |
| 5612 __ vdiv(d5, d6, d7); |
| 5613 } else if (Token::ADD == op_) { |
| 5614 __ vadd(d5, d6, d7); |
| 5615 } else if (Token::SUB == op_) { |
| 5616 __ vsub(d5, d6, d7); |
| 5617 } else { |
| 5618 UNREACHABLE(); |
| 5619 } |
| 5620 __ sub(r0, r5, Operand(kHeapObjectTag)); |
| 5621 __ vstr(d5, r0, HeapNumber::kValueOffset); |
| 5622 __ add(r0, r0, Operand(kHeapObjectTag)); |
| 5623 __ mov(pc, lr); |
| 5624 } else { |
| 5625 // If we did not inline the operation, then the arguments are in: |
| 5626 // r0: Left value (least significant part of mantissa). |
| 5627 // r1: Left value (sign, exponent, top of mantissa). |
| 5628 // r2: Right value (least significant part of mantissa). |
| 5629 // r3: Right value (sign, exponent, top of mantissa). |
| 5630 // r5: Address of heap number for result. |
| 5631 |
| 5632 __ push(lr); // For later. |
| 5633 __ push(r5); // Address of heap number that is answer. |
| 5634 __ AlignStack(0); |
| 5635 // Call C routine that may not cause GC or other trouble. |
| 5636 __ mov(r5, Operand(ExternalReference::double_fp_operation(op_))); |
| 5637 __ Call(r5); |
| 5638 __ pop(r4); // Address of heap number. |
| 5639 __ cmp(r4, Operand(Smi::FromInt(0))); |
| 5640 __ pop(r4, eq); // Conditional pop instruction |
| 5641 // to get rid of alignment push. |
| 5642 // Store answer in the overwritable heap number. |
| 5643 #if !defined(USE_ARM_EABI) |
| 5644 // Double returned in fp coprocessor register 0 and 1, encoded as register |
| 5645 // cr8. Offsets must be divisible by 4 for coprocessor so we need to |
| 5646 // substract the tag from r4. |
| 5647 __ sub(r5, r4, Operand(kHeapObjectTag)); |
| 5648 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset)); |
| 5649 #else |
| 5650 // Double returned in registers 0 and 1. |
| 5651 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset)); |
| 5652 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4)); |
| 5653 #endif |
| 5654 __ mov(r0, Operand(r4)); |
| 5655 // And we are done. |
| 5656 __ pop(pc); |
| 5657 } |
| 5658 } |
5494 // We jump to here if something goes wrong (one param is not a number of any | 5659 // We jump to here if something goes wrong (one param is not a number of any |
5495 // sort or new-space allocation fails). | 5660 // sort or new-space allocation fails). |
5496 __ bind(&slow); | 5661 __ bind(&slow); |
5497 | 5662 |
5498 // Push arguments to the stack | 5663 // Push arguments to the stack |
5499 __ push(r1); | 5664 __ push(r1); |
5500 __ push(r0); | 5665 __ push(r0); |
5501 | 5666 |
5502 if (Token::ADD == operation) { | 5667 if (Token::ADD == op_) { |
5503 // Test for string arguments before calling runtime. | 5668 // Test for string arguments before calling runtime. |
5504 // r1 : first argument | 5669 // r1 : first argument |
5505 // r0 : second argument | 5670 // r0 : second argument |
5506 // sp[0] : second argument | 5671 // sp[0] : second argument |
5507 // sp[4] : first argument | 5672 // sp[4] : first argument |
5508 | 5673 |
5509 Label not_strings, not_string1, string1, string1_smi2; | 5674 Label not_strings, not_string1, string1, string1_smi2; |
5510 __ tst(r1, Operand(kSmiTagMask)); | 5675 __ tst(r1, Operand(kSmiTagMask)); |
5511 __ b(eq, ¬_string1); | 5676 __ b(eq, ¬_string1); |
5512 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); | 5677 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5544 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); | 5709 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); |
5545 __ b(ge, ¬_strings); | 5710 __ b(ge, ¬_strings); |
5546 | 5711 |
5547 // Only second argument is a string. | 5712 // Only second argument is a string. |
5548 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); | 5713 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); |
5549 | 5714 |
5550 __ bind(¬_strings); | 5715 __ bind(¬_strings); |
5551 } | 5716 } |
5552 | 5717 |
5553 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. | 5718 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. |
5554 | |
5555 // We branch here if at least one of r0 and r1 is not a Smi. | |
5556 __ bind(not_smi); | |
5557 if (mode == NO_OVERWRITE) { | |
5558 // In the case where there is no chance of an overwritable float we may as | |
5559 // well do the allocation immediately while r0 and r1 are untouched. | |
5560 __ AllocateHeapNumber(r5, r6, r7, &slow); | |
5561 } | |
5562 | |
5563 // Move r0 to a double in r2-r3. | |
5564 __ tst(r0, Operand(kSmiTagMask)); | |
5565 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. | |
5566 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | |
5567 __ b(ne, &slow); | |
5568 if (mode == OVERWRITE_RIGHT) { | |
5569 __ mov(r5, Operand(r0)); // Overwrite this heap number. | |
5570 } | |
5571 if (use_fp_registers) { | |
5572 CpuFeatures::Scope scope(VFP3); | |
5573 // Load the double from tagged HeapNumber r0 to d7. | |
5574 __ sub(r7, r0, Operand(kHeapObjectTag)); | |
5575 __ vldr(d7, r7, HeapNumber::kValueOffset); | |
5576 } else { | |
5577 // Calling convention says that second double is in r2 and r3. | |
5578 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
5579 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); | |
5580 } | |
5581 __ jmp(&finished_loading_r0); | |
5582 __ bind(&r0_is_smi); | |
5583 if (mode == OVERWRITE_RIGHT) { | |
5584 // We can't overwrite a Smi so get address of new heap number into r5. | |
5585 __ AllocateHeapNumber(r5, r6, r7, &slow); | |
5586 } | |
5587 | |
5588 if (use_fp_registers) { | |
5589 CpuFeatures::Scope scope(VFP3); | |
5590 // Convert smi in r0 to double in d7. | |
5591 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | |
5592 __ vmov(s15, r7); | |
5593 __ vcvt_f64_s32(d7, s15); | |
5594 } else { | |
5595 // Write Smi from r0 to r3 and r2 in double format. | |
5596 __ mov(r7, Operand(r0)); | |
5597 ConvertToDoubleStub stub3(r3, r2, r7, r6); | |
5598 __ push(lr); | |
5599 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); | |
5600 __ pop(lr); | |
5601 } | |
5602 | |
5603 __ bind(&finished_loading_r0); | |
5604 | |
5605 // Move r1 to a double in r0-r1. | |
5606 __ tst(r1, Operand(kSmiTagMask)); | |
5607 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. | |
5608 __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); | |
5609 __ b(ne, &slow); | |
5610 if (mode == OVERWRITE_LEFT) { | |
5611 __ mov(r5, Operand(r1)); // Overwrite this heap number. | |
5612 } | |
5613 if (use_fp_registers) { | |
5614 CpuFeatures::Scope scope(VFP3); | |
5615 // Load the double from tagged HeapNumber r1 to d6. | |
5616 __ sub(r7, r1, Operand(kHeapObjectTag)); | |
5617 __ vldr(d6, r7, HeapNumber::kValueOffset); | |
5618 } else { | |
5619 // Calling convention says that first double is in r0 and r1. | |
5620 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); | |
5621 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); | |
5622 } | |
5623 __ jmp(&finished_loading_r1); | |
5624 __ bind(&r1_is_smi); | |
5625 if (mode == OVERWRITE_LEFT) { | |
5626 // We can't overwrite a Smi so get address of new heap number into r5. | |
5627 __ AllocateHeapNumber(r5, r6, r7, &slow); | |
5628 } | |
5629 | |
5630 if (use_fp_registers) { | |
5631 CpuFeatures::Scope scope(VFP3); | |
5632 // Convert smi in r1 to double in d6. | |
5633 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); | |
5634 __ vmov(s13, r7); | |
5635 __ vcvt_f64_s32(d6, s13); | |
5636 } else { | |
5637 // Write Smi from r1 to r1 and r0 in double format. | |
5638 __ mov(r7, Operand(r1)); | |
5639 ConvertToDoubleStub stub4(r1, r0, r7, r6); | |
5640 __ push(lr); | |
5641 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); | |
5642 __ pop(lr); | |
5643 } | |
5644 | |
5645 __ bind(&finished_loading_r1); | |
5646 | |
5647 __ bind(&do_the_call); | |
5648 // If we are inlining the operation using VFP3 instructions for | |
5649 // add, subtract, multiply, or divide, the arguments are in d6 and d7. | |
5650 if (use_fp_registers) { | |
5651 CpuFeatures::Scope scope(VFP3); | |
5652 // ARMv7 VFP3 instructions to implement | |
5653 // double precision, add, subtract, multiply, divide. | |
5654 | |
5655 if (Token::MUL == operation) { | |
5656 __ vmul(d5, d6, d7); | |
5657 } else if (Token::DIV == operation) { | |
5658 __ vdiv(d5, d6, d7); | |
5659 } else if (Token::ADD == operation) { | |
5660 __ vadd(d5, d6, d7); | |
5661 } else if (Token::SUB == operation) { | |
5662 __ vsub(d5, d6, d7); | |
5663 } else { | |
5664 UNREACHABLE(); | |
5665 } | |
5666 __ sub(r0, r5, Operand(kHeapObjectTag)); | |
5667 __ vstr(d5, r0, HeapNumber::kValueOffset); | |
5668 __ add(r0, r0, Operand(kHeapObjectTag)); | |
5669 __ mov(pc, lr); | |
5670 return; | |
5671 } | |
5672 | |
5673 // If we did not inline the operation, then the arguments are in: | |
5674 // r0: Left value (least significant part of mantissa). | |
5675 // r1: Left value (sign, exponent, top of mantissa). | |
5676 // r2: Right value (least significant part of mantissa). | |
5677 // r3: Right value (sign, exponent, top of mantissa). | |
5678 // r5: Address of heap number for result. | |
5679 | |
5680 __ push(lr); // For later. | |
5681 __ push(r5); // Address of heap number that is answer. | |
5682 __ AlignStack(0); | |
5683 // Call C routine that may not cause GC or other trouble. | |
5684 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); | |
5685 __ Call(r5); | |
5686 __ pop(r4); // Address of heap number. | |
5687 __ cmp(r4, Operand(Smi::FromInt(0))); | |
5688 __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push. | |
5689 // Store answer in the overwritable heap number. | |
5690 #if !defined(USE_ARM_EABI) | |
5691 // Double returned in fp coprocessor register 0 and 1, encoded as register | |
5692 // cr8. Offsets must be divisible by 4 for coprocessor so we need to | |
5693 // substract the tag from r4. | |
5694 __ sub(r5, r4, Operand(kHeapObjectTag)); | |
5695 __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset)); | |
5696 #else | |
5697 // Double returned in registers 0 and 1. | |
5698 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset)); | |
5699 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4)); | |
5700 #endif | |
5701 __ mov(r0, Operand(r4)); | |
5702 // And we are done. | |
5703 __ pop(pc); | |
5704 } | 5719 } |
5705 | 5720 |
5706 | 5721 |
5707 // Tries to get a signed int32 out of a double precision floating point heap | 5722 // Tries to get a signed int32 out of a double precision floating point heap |
5708 // number. Rounds towards 0. Fastest for doubles that are in the ranges | 5723 // number. Rounds towards 0. Fastest for doubles that are in the ranges |
5709 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds | 5724 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds |
5710 // almost to the range of signed int32 values that are not Smis. Jumps to the | 5725 // almost to the range of signed int32 values that are not Smis. Jumps to the |
5711 // label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0 | 5726 // label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0 |
5712 // (excluding the endpoints). | 5727 // (excluding the endpoints). |
5713 static void GetInt32(MacroAssembler* masm, | 5728 static void GetInt32(MacroAssembler* masm, |
(...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6027 | 6042 |
6028 | 6043 |
6029 | 6044 |
6030 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | 6045 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
6031 // r1 : x | 6046 // r1 : x |
6032 // r0 : y | 6047 // r0 : y |
6033 // result : r0 | 6048 // result : r0 |
6034 | 6049 |
6035 // All ops need to know whether we are dealing with two Smis. Set up r2 to | 6050 // All ops need to know whether we are dealing with two Smis. Set up r2 to |
6036 // tell us that. | 6051 // tell us that. |
6037 __ orr(r2, r1, Operand(r0)); // r2 = x | y; | 6052 if (ShouldGenerateSmiCode()) { |
| 6053 __ orr(r2, r1, Operand(r0)); // r2 = x | y; |
| 6054 } |
6038 | 6055 |
6039 switch (op_) { | 6056 switch (op_) { |
6040 case Token::ADD: { | 6057 case Token::ADD: { |
6041 Label not_smi; | 6058 Label not_smi; |
6042 // Fast path. | 6059 // Fast path. |
6043 ASSERT(kSmiTag == 0); // Adjust code below. | 6060 if (ShouldGenerateSmiCode()) { |
6044 __ tst(r2, Operand(kSmiTagMask)); | 6061 ASSERT(kSmiTag == 0); // Adjust code below. |
6045 __ b(ne, ¬_smi); | 6062 __ tst(r2, Operand(kSmiTagMask)); |
6046 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. | 6063 __ b(ne, ¬_smi); |
6047 // Return if no overflow. | 6064 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. |
6048 __ Ret(vc); | 6065 // Return if no overflow. |
6049 __ sub(r0, r0, Operand(r1)); // Revert optimistic add. | 6066 __ Ret(vc); |
6050 | 6067 __ sub(r0, r0, Operand(r1)); // Revert optimistic add. |
6051 HandleBinaryOpSlowCases(masm, | 6068 } |
6052 ¬_smi, | 6069 HandleBinaryOpSlowCases(masm, ¬_smi, Builtins::ADD); |
6053 Builtins::ADD, | |
6054 Token::ADD, | |
6055 mode_); | |
6056 break; | 6070 break; |
6057 } | 6071 } |
6058 | 6072 |
6059 case Token::SUB: { | 6073 case Token::SUB: { |
6060 Label not_smi; | 6074 Label not_smi; |
6061 // Fast path. | 6075 // Fast path. |
6062 ASSERT(kSmiTag == 0); // Adjust code below. | 6076 if (ShouldGenerateSmiCode()) { |
6063 __ tst(r2, Operand(kSmiTagMask)); | 6077 ASSERT(kSmiTag == 0); // Adjust code below. |
6064 __ b(ne, ¬_smi); | 6078 __ tst(r2, Operand(kSmiTagMask)); |
6065 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. | 6079 __ b(ne, ¬_smi); |
6066 // Return if no overflow. | 6080 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. |
6067 __ Ret(vc); | 6081 // Return if no overflow. |
6068 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. | 6082 __ Ret(vc); |
6069 | 6083 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. |
6070 HandleBinaryOpSlowCases(masm, | 6084 } |
6071 ¬_smi, | 6085 HandleBinaryOpSlowCases(masm, ¬_smi, Builtins::SUB); |
6072 Builtins::SUB, | |
6073 Token::SUB, | |
6074 mode_); | |
6075 break; | 6086 break; |
6076 } | 6087 } |
6077 | 6088 |
6078 case Token::MUL: { | 6089 case Token::MUL: { |
6079 Label not_smi, slow; | 6090 Label not_smi, slow; |
6080 ASSERT(kSmiTag == 0); // adjust code below | 6091 if (ShouldGenerateSmiCode()) { |
6081 __ tst(r2, Operand(kSmiTagMask)); | 6092 ASSERT(kSmiTag == 0); // adjust code below |
6082 __ b(ne, ¬_smi); | 6093 __ tst(r2, Operand(kSmiTagMask)); |
6083 // Remove tag from one operand (but keep sign), so that result is Smi. | 6094 __ b(ne, ¬_smi); |
6084 __ mov(ip, Operand(r0, ASR, kSmiTagSize)); | 6095 // Remove tag from one operand (but keep sign), so that result is Smi. |
6085 // Do multiplication | 6096 __ mov(ip, Operand(r0, ASR, kSmiTagSize)); |
6086 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1. | 6097 // Do multiplication |
6087 // Go slow on overflows (overflow bit is not set). | 6098 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1. |
6088 __ mov(ip, Operand(r3, ASR, 31)); | 6099 // Go slow on overflows (overflow bit is not set). |
6089 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical | 6100 __ mov(ip, Operand(r3, ASR, 31)); |
6090 __ b(ne, &slow); | 6101 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical |
6091 // Go slow on zero result to handle -0. | 6102 __ b(ne, &slow); |
6092 __ tst(r3, Operand(r3)); | 6103 // Go slow on zero result to handle -0. |
6093 __ mov(r0, Operand(r3), LeaveCC, ne); | 6104 __ tst(r3, Operand(r3)); |
6094 __ Ret(ne); | 6105 __ mov(r0, Operand(r3), LeaveCC, ne); |
6095 // We need -0 if we were multiplying a negative number with 0 to get 0. | 6106 __ Ret(ne); |
6096 // We know one of them was zero. | 6107 // We need -0 if we were multiplying a negative number with 0 to get 0. |
6097 __ add(r2, r0, Operand(r1), SetCC); | 6108 // We know one of them was zero. |
6098 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl); | 6109 __ add(r2, r0, Operand(r1), SetCC); |
6099 __ Ret(pl); // Return Smi 0 if the non-zero one was positive. | 6110 __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl); |
6100 // Slow case. We fall through here if we multiplied a negative number | 6111 __ Ret(pl); // Return Smi 0 if the non-zero one was positive. |
6101 // with 0, because that would mean we should produce -0. | 6112 // Slow case. We fall through here if we multiplied a negative number |
6102 __ bind(&slow); | 6113 // with 0, because that would mean we should produce -0. |
6103 | 6114 __ bind(&slow); |
6104 HandleBinaryOpSlowCases(masm, | 6115 } |
6105 ¬_smi, | 6116 HandleBinaryOpSlowCases(masm, ¬_smi, Builtins::MUL); |
6106 Builtins::MUL, | |
6107 Token::MUL, | |
6108 mode_); | |
6109 break; | 6117 break; |
6110 } | 6118 } |
6111 | 6119 |
6112 case Token::DIV: | 6120 case Token::DIV: |
6113 case Token::MOD: { | 6121 case Token::MOD: { |
6114 Label not_smi; | 6122 Label not_smi; |
6115 if (specialized_on_rhs_) { | 6123 if (ShouldGenerateSmiCode()) { |
6116 Label smi_is_unsuitable; | 6124 Label smi_is_unsuitable; |
6117 __ BranchOnNotSmi(r1, ¬_smi); | 6125 __ BranchOnNotSmi(r1, ¬_smi); |
6118 if (IsPowerOf2(constant_rhs_)) { | 6126 if (IsPowerOf2(constant_rhs_)) { |
6119 if (op_ == Token::MOD) { | 6127 if (op_ == Token::MOD) { |
6120 __ and_(r0, | 6128 __ and_(r0, |
6121 r1, | 6129 r1, |
6122 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), | 6130 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), |
6123 SetCC); | 6131 SetCC); |
6124 // We now have the answer, but if the input was negative we also | 6132 // We now have the answer, but if the input was negative we also |
6125 // have the sign bit. Our work is done if the result is | 6133 // have the sign bit. Our work is done if the result is |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6185 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC); | 6193 __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC); |
6186 __ b(ne, &smi_is_unsuitable); // There was a remainder. | 6194 __ b(ne, &smi_is_unsuitable); // There was a remainder. |
6187 __ mov(r0, Operand(r2, LSL, kSmiTagSize)); | 6195 __ mov(r0, Operand(r2, LSL, kSmiTagSize)); |
6188 } else { | 6196 } else { |
6189 ASSERT(op_ == Token::MOD); | 6197 ASSERT(op_ == Token::MOD); |
6190 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift)); | 6198 __ sub(r0, r1, Operand(r4, LSL, required_r4_shift)); |
6191 } | 6199 } |
6192 } | 6200 } |
6193 __ Ret(); | 6201 __ Ret(); |
6194 __ bind(&smi_is_unsuitable); | 6202 __ bind(&smi_is_unsuitable); |
6195 } else { | |
6196 __ jmp(¬_smi); | |
6197 } | 6203 } |
6198 HandleBinaryOpSlowCases(masm, | 6204 HandleBinaryOpSlowCases( |
6199 ¬_smi, | 6205 masm, |
6200 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV, | 6206 ¬_smi, |
6201 op_, | 6207 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); |
6202 mode_); | |
6203 break; | 6208 break; |
6204 } | 6209 } |
6205 | 6210 |
6206 case Token::BIT_OR: | 6211 case Token::BIT_OR: |
6207 case Token::BIT_AND: | 6212 case Token::BIT_AND: |
6208 case Token::BIT_XOR: | 6213 case Token::BIT_XOR: |
6209 case Token::SAR: | 6214 case Token::SAR: |
6210 case Token::SHR: | 6215 case Token::SHR: |
6211 case Token::SHL: { | 6216 case Token::SHL: { |
6212 Label slow; | 6217 Label slow; |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6252 __ Ret(); | 6257 __ Ret(); |
6253 __ bind(&slow); | 6258 __ bind(&slow); |
6254 HandleNonSmiBitwiseOp(masm); | 6259 HandleNonSmiBitwiseOp(masm); |
6255 break; | 6260 break; |
6256 } | 6261 } |
6257 | 6262 |
6258 default: UNREACHABLE(); | 6263 default: UNREACHABLE(); |
6259 } | 6264 } |
6260 // This code should be unreachable. | 6265 // This code should be unreachable. |
6261 __ stop("Unreachable"); | 6266 __ stop("Unreachable"); |
| 6267 |
| 6268 // Generate an unreachable reference to the DEFAULT stub so that it can be |
| 6269 // found at the end of this stub when clearing ICs at GC. |
| 6270 // TODO(kaznacheev): Check performance impact and get rid of this. |
| 6271 if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { |
| 6272 GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); |
| 6273 __ CallStub(&uninit); |
| 6274 } |
| 6275 } |
| 6276 |
| 6277 |
| 6278 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 6279 Label get_result; |
| 6280 |
| 6281 __ push(r1); |
| 6282 __ push(r0); |
| 6283 |
| 6284 // Internal frame is necessary to handle exceptions properly. |
| 6285 __ EnterInternalFrame(); |
| 6286 // Call the stub proper to get the result in r0. |
| 6287 __ Call(&get_result); |
| 6288 __ LeaveInternalFrame(); |
| 6289 |
| 6290 __ push(r0); |
| 6291 |
| 6292 __ mov(r0, Operand(Smi::FromInt(MinorKey()))); |
| 6293 __ push(r0); |
| 6294 __ mov(r0, Operand(Smi::FromInt(op_))); |
| 6295 __ push(r0); |
| 6296 __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); |
| 6297 __ push(r0); |
| 6298 |
| 6299 __ TailCallExternalReference( |
| 6300 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), |
| 6301 6, |
| 6302 1); |
| 6303 |
| 6304 // The entry point for the result calculation is assumed to be immediately |
| 6305 // after this sequence. |
| 6306 __ bind(&get_result); |
6262 } | 6307 } |
6263 | 6308 |
6264 | 6309 |
6265 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { | 6310 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { |
6266 return Handle<Code>::null(); | 6311 GenericBinaryOpStub stub(key, type_info); |
| 6312 return stub.GetCode(); |
6267 } | 6313 } |
6268 | 6314 |
6269 | 6315 |
6270 void StackCheckStub::Generate(MacroAssembler* masm) { | 6316 void StackCheckStub::Generate(MacroAssembler* masm) { |
6271 // Do tail-call to runtime routine. Runtime routines expect at least one | 6317 // Do tail-call to runtime routine. Runtime routines expect at least one |
6272 // argument, so give it a Smi. | 6318 // argument, so give it a Smi. |
6273 __ mov(r0, Operand(Smi::FromInt(0))); | 6319 __ mov(r0, Operand(Smi::FromInt(0))); |
6274 __ push(r0); | 6320 __ push(r0); |
6275 __ TailCallRuntime(Runtime::kStackGuard, 1, 1); | 6321 __ TailCallRuntime(Runtime::kStackGuard, 1, 1); |
6276 | 6322 |
(...skipping 1680 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7957 | 8003 |
7958 // Just jump to runtime to add the two strings. | 8004 // Just jump to runtime to add the two strings. |
7959 __ bind(&string_add_runtime); | 8005 __ bind(&string_add_runtime); |
7960 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 8006 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
7961 } | 8007 } |
7962 | 8008 |
7963 | 8009 |
7964 #undef __ | 8010 #undef __ |
7965 | 8011 |
7966 } } // namespace v8::internal | 8012 } } // namespace v8::internal |
OLD | NEW |