OLD | NEW |
---|---|
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 4666 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4677 | 4677 |
4678 // Return and remove the on-stack parameters. | 4678 // Return and remove the on-stack parameters. |
4679 __ add(sp, sp, Operand(3 * kPointerSize)); | 4679 __ add(sp, sp, Operand(3 * kPointerSize)); |
4680 __ Ret(); | 4680 __ Ret(); |
4681 | 4681 |
4682 __ bind(&slow_case); | 4682 __ bind(&slow_case); |
4683 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); | 4683 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); |
4684 } | 4684 } |
4685 | 4685 |
4686 | 4686 |
4687 // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz | |
4688 // instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0 | |
4689 // (31 instead of 32). | |
4690 static void CountLeadingZeros( | |
4691 MacroAssembler* masm, | |
4692 Register source, | |
4693 Register scratch, | |
4694 Register zeros) { | |
4695 #ifdef CAN_USE_ARMV5_INSTRUCTIONS | |
4696 __ clz(zeros, source); // This instruction is only supported after ARM5. | |
4697 #else | |
4698 __ mov(zeros, Operand(0)); | |
4699 __ mov(scratch, source); | |
4700 // Top 16. | |
4701 __ tst(scratch, Operand(0xffff0000)); | |
4702 __ add(zeros, zeros, Operand(16), LeaveCC, eq); | |
4703 __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq); | |
4704 // Top 8. | |
4705 __ tst(scratch, Operand(0xff000000)); | |
4706 __ add(zeros, zeros, Operand(8), LeaveCC, eq); | |
4707 __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq); | |
4708 // Top 4. | |
4709 __ tst(scratch, Operand(0xf0000000)); | |
4710 __ add(zeros, zeros, Operand(4), LeaveCC, eq); | |
4711 __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq); | |
4712 // Top 2. | |
4713 __ tst(scratch, Operand(0xc0000000)); | |
4714 __ add(zeros, zeros, Operand(2), LeaveCC, eq); | |
4715 __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq); | |
4716 // Top bit. | |
4717 __ tst(scratch, Operand(0x80000000u)); | |
4718 __ add(zeros, zeros, Operand(1), LeaveCC, eq); | |
4719 #endif | |
4720 } | |
4721 | |
4722 | |
4723 // Takes a Smi and converts to an IEEE 64 bit floating point value in two | 4687 // Takes a Smi and converts to an IEEE 64 bit floating point value in two |
4724 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and | 4688 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and |
4725 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a | 4689 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a |
4726 // scratch register. Destroys the source register. No GC occurs during this | 4690 // scratch register. Destroys the source register. No GC occurs during this |
4727 // stub so you don't have to set up the frame. | 4691 // stub so you don't have to set up the frame. |
4728 class ConvertToDoubleStub : public CodeStub { | 4692 class ConvertToDoubleStub : public CodeStub { |
4729 public: | 4693 public: |
4730 ConvertToDoubleStub(Register result_reg_1, | 4694 ConvertToDoubleStub(Register result_reg_1, |
4731 Register result_reg_2, | 4695 Register result_reg_2, |
4732 Register source_reg, | 4696 Register source_reg, |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4779 // Move sign bit from source to destination. This works because the sign bit | 4743 // Move sign bit from source to destination. This works because the sign bit |
4780 // in the exponent word of the double has the same position and polarity as | 4744 // in the exponent word of the double has the same position and polarity as |
4781 // the 2's complement sign bit in a Smi. | 4745 // the 2's complement sign bit in a Smi. |
4782 ASSERT(HeapNumber::kSignMask == 0x80000000u); | 4746 ASSERT(HeapNumber::kSignMask == 0x80000000u); |
4783 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); | 4747 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); |
4784 // Subtract from 0 if source was negative. | 4748 // Subtract from 0 if source was negative. |
4785 __ rsb(source_, source_, Operand(0), LeaveCC, ne); | 4749 __ rsb(source_, source_, Operand(0), LeaveCC, ne); |
4786 __ cmp(source_, Operand(1)); | 4750 __ cmp(source_, Operand(1)); |
4787 __ b(gt, ¬_special); | 4751 __ b(gt, ¬_special); |
4788 | 4752 |
4789 // We have -1, 0 or 1, which we treat specially. | 4753 // We have -1, 0 or 1, which we treat specially. |
Mads Ager (chromium)
2010/03/23 11:46:54
Could we clearify the comment to state that source
| |
4790 __ cmp(source_, Operand(0)); | |
4791 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). | 4754 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). |
4792 static const uint32_t exponent_word_for_1 = | 4755 static const uint32_t exponent_word_for_1 = |
4793 HeapNumber::kExponentBias << HeapNumber::kExponentShift; | 4756 HeapNumber::kExponentBias << HeapNumber::kExponentShift; |
4794 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne); | 4757 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); |
4795 // 1, 0 and -1 all have 0 for the second word. | 4758 // 1, 0 and -1 all have 0 for the second word. |
4796 __ mov(mantissa, Operand(0)); | 4759 __ mov(mantissa, Operand(0)); |
4797 __ Ret(); | 4760 __ Ret(); |
4798 | 4761 |
4799 __ bind(¬_special); | 4762 __ bind(¬_special); |
4800 // Count leading zeros. Uses result2 for a scratch register on pre-ARM5. | 4763 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. |
4801 // Gets the wrong answer for 0, but we already checked for that case above. | 4764 // Gets the wrong answer for 0, but we already checked for that case above. |
4802 CountLeadingZeros(masm, source_, mantissa, zeros_); | 4765 __ CountLeadingZeros(source_, mantissa, zeros_); |
4803 // Compute exponent and or it into the exponent register. | 4766 // Compute exponent and or it into the exponent register. |
4804 // We use result2 as a scratch register here. | 4767 // We use mantissa as a scratch register here. |
4805 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias)); | 4768 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias)); |
4806 __ orr(exponent, | 4769 __ orr(exponent, |
4807 exponent, | 4770 exponent, |
4808 Operand(mantissa, LSL, HeapNumber::kExponentShift)); | 4771 Operand(mantissa, LSL, HeapNumber::kExponentShift)); |
4809 // Shift up the source chopping the top bit off. | 4772 // Shift up the source chopping the top bit off. |
4810 __ add(zeros_, zeros_, Operand(1)); | 4773 __ add(zeros_, zeros_, Operand(1)); |
4811 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. | 4774 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. |
4812 __ mov(source_, Operand(source_, LSL, zeros_)); | 4775 __ mov(source_, Operand(source_, LSL, zeros_)); |
4813 // Compute lower part of fraction (last 12 bits). | 4776 // Compute lower part of fraction (last 12 bits). |
4814 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); | 4777 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); |
4815 // And the top (top 20 bits). | 4778 // And the top (top 20 bits). |
4816 __ orr(exponent, | 4779 __ orr(exponent, |
4817 exponent, | 4780 exponent, |
4818 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); | 4781 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); |
4819 __ Ret(); | 4782 __ Ret(); |
4820 } | 4783 } |
4821 | 4784 |
4822 | 4785 |
4823 // This stub can convert a signed int32 to a heap number (double). It does | |
4824 // not work for int32s that are in Smi range! No GC occurs during this stub | |
4825 // so you don't have to set up the frame. | |
4826 class WriteInt32ToHeapNumberStub : public CodeStub { | |
4827 public: | |
4828 WriteInt32ToHeapNumberStub(Register the_int, | |
4829 Register the_heap_number, | |
4830 Register scratch) | |
4831 : the_int_(the_int), | |
4832 the_heap_number_(the_heap_number), | |
4833 scratch_(scratch) { } | |
4834 | |
4835 private: | |
4836 Register the_int_; | |
4837 Register the_heap_number_; | |
4838 Register scratch_; | |
4839 | |
4840 // Minor key encoding in 16 bits. | |
4841 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; | |
4842 class OpBits: public BitField<Token::Value, 2, 14> {}; | |
4843 | |
4844 Major MajorKey() { return WriteInt32ToHeapNumber; } | |
4845 int MinorKey() { | |
4846 // Encode the parameters in a unique 16 bit value. | |
4847 return the_int_.code() + | |
4848 (the_heap_number_.code() << 4) + | |
4849 (scratch_.code() << 8); | |
4850 } | |
4851 | |
4852 void Generate(MacroAssembler* masm); | |
4853 | |
4854 const char* GetName() { return "WriteInt32ToHeapNumberStub"; } | |
4855 | |
4856 #ifdef DEBUG | |
4857 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); } | |
4858 #endif | |
4859 }; | |
4860 | |
4861 | |
4862 // See comment for class. | 4786 // See comment for class. |
4863 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { | 4787 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { |
4864 Label max_negative_int; | 4788 Label max_negative_int; |
4865 // the_int_ has the answer which is a signed int32 but not a Smi. | 4789 // the_int_ has the answer which is a signed int32 but not a Smi. |
4866 // We test for the special value that has a different exponent. This test | 4790 // We test for the special value that has a different exponent. This test |
4867 // has the neat side effect of setting the flags according to the sign. | 4791 // has the neat side effect of setting the flags according to the sign. |
4868 ASSERT(HeapNumber::kSignMask == 0x80000000u); | 4792 ASSERT(HeapNumber::kSignMask == 0x80000000u); |
4869 __ cmp(the_int_, Operand(0x80000000u)); | 4793 __ cmp(the_int_, Operand(0x80000000u)); |
4870 __ b(eq, &max_negative_int); | 4794 __ b(eq, &max_negative_int); |
4871 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. | 4795 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. |
(...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5034 // the runtime. | 4958 // the runtime. |
5035 __ b(ne, slow); | 4959 __ b(ne, slow); |
5036 } | 4960 } |
5037 | 4961 |
5038 // Lhs (r1) is a smi, rhs (r0) is a number. | 4962 // Lhs (r1) is a smi, rhs (r0) is a number. |
5039 if (CpuFeatures::IsSupported(VFP3)) { | 4963 if (CpuFeatures::IsSupported(VFP3)) { |
5040 // Convert lhs to a double in d7 . | 4964 // Convert lhs to a double in d7 . |
5041 CpuFeatures::Scope scope(VFP3); | 4965 CpuFeatures::Scope scope(VFP3); |
5042 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); | 4966 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); |
5043 __ vmov(s15, r7); | 4967 __ vmov(s15, r7); |
5044 __ vcvt(d7, s15); | 4968 __ vcvt_f64_s32(d7, s15); |
5045 // Load the double from rhs, tagged HeapNumber r0, to d6. | 4969 // Load the double from rhs, tagged HeapNumber r0, to d6. |
5046 __ sub(r7, r0, Operand(kHeapObjectTag)); | 4970 __ sub(r7, r0, Operand(kHeapObjectTag)); |
5047 __ vldr(d6, r7, HeapNumber::kValueOffset); | 4971 __ vldr(d6, r7, HeapNumber::kValueOffset); |
5048 } else { | 4972 } else { |
5049 __ push(lr); | 4973 __ push(lr); |
5050 // Convert lhs to a double in r2, r3. | 4974 // Convert lhs to a double in r2, r3. |
5051 __ mov(r7, Operand(r1)); | 4975 __ mov(r7, Operand(r1)); |
5052 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 4976 ConvertToDoubleStub stub1(r3, r2, r7, r6); |
5053 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 4977 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
5054 // Load rhs to a double in r0, r1. | 4978 // Load rhs to a double in r0, r1. |
(...skipping 22 matching lines...) Expand all Loading... | |
5077 | 5001 |
5078 // Rhs (r0) is a smi, lhs (r1) is a heap number. | 5002 // Rhs (r0) is a smi, lhs (r1) is a heap number. |
5079 if (CpuFeatures::IsSupported(VFP3)) { | 5003 if (CpuFeatures::IsSupported(VFP3)) { |
5080 // Convert rhs to a double in d6 . | 5004 // Convert rhs to a double in d6 . |
5081 CpuFeatures::Scope scope(VFP3); | 5005 CpuFeatures::Scope scope(VFP3); |
5082 // Load the double from lhs, tagged HeapNumber r1, to d7. | 5006 // Load the double from lhs, tagged HeapNumber r1, to d7. |
5083 __ sub(r7, r1, Operand(kHeapObjectTag)); | 5007 __ sub(r7, r1, Operand(kHeapObjectTag)); |
5084 __ vldr(d7, r7, HeapNumber::kValueOffset); | 5008 __ vldr(d7, r7, HeapNumber::kValueOffset); |
5085 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | 5009 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
5086 __ vmov(s13, r7); | 5010 __ vmov(s13, r7); |
5087 __ vcvt(d6, s13); | 5011 __ vcvt_f64_s32(d6, s13); |
5088 } else { | 5012 } else { |
5089 __ push(lr); | 5013 __ push(lr); |
5090 // Load lhs to a double in r2, r3. | 5014 // Load lhs to a double in r2, r3. |
5091 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); | 5015 __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); |
5092 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 5016 __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
5093 // Convert rhs to a double in r0, r1. | 5017 // Convert rhs to a double in r0, r1. |
5094 __ mov(r7, Operand(r0)); | 5018 __ mov(r7, Operand(r0)); |
5095 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 5019 ConvertToDoubleStub stub2(r1, r0, r7, r6); |
5096 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 5020 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
5097 __ pop(lr); | 5021 __ pop(lr); |
(...skipping 315 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5413 __ mov(r0, Operand(Smi::FromInt(ncr))); | 5337 __ mov(r0, Operand(Smi::FromInt(ncr))); |
5414 __ push(r0); | 5338 __ push(r0); |
5415 } | 5339 } |
5416 | 5340 |
5417 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 5341 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
5418 // tagged as a small integer. | 5342 // tagged as a small integer. |
5419 __ InvokeBuiltin(native, JUMP_JS); | 5343 __ InvokeBuiltin(native, JUMP_JS); |
5420 } | 5344 } |
5421 | 5345 |
5422 | 5346 |
5423 // Allocates a heap number or jumps to the label if the young space is full and | |
5424 // a scavenge is needed. | |
5425 static void AllocateHeapNumber( | |
5426 MacroAssembler* masm, | |
5427 Label* need_gc, // Jump here if young space is full. | |
5428 Register result, // The tagged address of the new heap number. | |
5429 Register scratch1, // A scratch register. | |
5430 Register scratch2) { // Another scratch register. | |
5431 // Allocate an object in the heap for the heap number and tag it as a heap | |
5432 // object. | |
5433 __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize, | |
5434 result, | |
5435 scratch1, | |
5436 scratch2, | |
5437 need_gc, | |
5438 TAG_OBJECT); | |
5439 | |
5440 // Get heap number map and store it in the allocated object. | |
5441 __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex); | |
5442 __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); | |
5443 } | |
5444 | |
5445 | |
5446 // We fall into this code if the operands were Smis, but the result was | 5347 // We fall into this code if the operands were Smis, but the result was |
5447 // not (eg. overflow). We branch into this code (to the not_smi label) if | 5348 // not (eg. overflow). We branch into this code (to the not_smi label) if |
5448 // the operands were not both Smi. The operands are in r0 and r1. In order | 5349 // the operands were not both Smi. The operands are in r0 and r1. In order |
5449 // to call the C-implemented binary fp operation routines we need to end up | 5350 // to call the C-implemented binary fp operation routines we need to end up |
5450 // with the double precision floating point operands in r0 and r1 (for the | 5351 // with the double precision floating point operands in r0 and r1 (for the |
5451 // value in r1) and r2 and r3 (for the value in r0). | 5352 // value in r1) and r2 and r3 (for the value in r0). |
5452 static void HandleBinaryOpSlowCases(MacroAssembler* masm, | 5353 static void HandleBinaryOpSlowCases(MacroAssembler* masm, |
5453 Label* not_smi, | 5354 Label* not_smi, |
5454 const Builtins::JavaScript& builtin, | 5355 const Builtins::JavaScript& builtin, |
5455 Token::Value operation, | 5356 Token::Value operation, |
5456 OverwriteMode mode) { | 5357 OverwriteMode mode) { |
5457 Label slow, slow_pop_2_first, do_the_call; | 5358 Label slow, slow_pop_2_first, do_the_call; |
5458 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | 5359 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; |
5459 // Smi-smi case (overflow). | 5360 // Smi-smi case (overflow). |
5460 // Since both are Smis there is no heap number to overwrite, so allocate. | 5361 // Since both are Smis there is no heap number to overwrite, so allocate. |
5461 // The new heap number is in r5. r6 and r7 are scratch. | 5362 // The new heap number is in r5. r6 and r7 are scratch. |
5462 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5363 __ AllocateHeapNumber(r5, r6, r7, &slow); |
5463 | 5364 |
5464 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, | 5365 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, |
5465 // using registers d7 and d6 for the double values. | 5366 // using registers d7 and d6 for the double values. |
5466 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && | 5367 bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && |
5467 Token::MOD != operation; | 5368 Token::MOD != operation; |
5468 if (use_fp_registers) { | 5369 if (use_fp_registers) { |
5469 CpuFeatures::Scope scope(VFP3); | 5370 CpuFeatures::Scope scope(VFP3); |
5470 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | 5371 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
5471 __ vmov(s15, r7); | 5372 __ vmov(s15, r7); |
5472 __ vcvt(d7, s15); | 5373 __ vcvt_f64_s32(d7, s15); |
5473 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); | 5374 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); |
5474 __ vmov(s13, r7); | 5375 __ vmov(s13, r7); |
5475 __ vcvt(d6, s13); | 5376 __ vcvt_f64_s32(d6, s13); |
5476 } else { | 5377 } else { |
5477 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. | 5378 // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. |
5478 __ mov(r7, Operand(r0)); | 5379 __ mov(r7, Operand(r0)); |
5479 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 5380 ConvertToDoubleStub stub1(r3, r2, r7, r6); |
5480 __ push(lr); | 5381 __ push(lr); |
5481 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 5382 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
5482 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. | 5383 // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. |
5483 __ mov(r7, Operand(r1)); | 5384 __ mov(r7, Operand(r1)); |
5484 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 5385 ConvertToDoubleStub stub2(r1, r0, r7, r6); |
5485 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 5386 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5536 __ bind(¬_strings); | 5437 __ bind(¬_strings); |
5537 } | 5438 } |
5538 | 5439 |
5539 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. | 5440 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. |
5540 | 5441 |
5541 // We branch here if at least one of r0 and r1 is not a Smi. | 5442 // We branch here if at least one of r0 and r1 is not a Smi. |
5542 __ bind(not_smi); | 5443 __ bind(not_smi); |
5543 if (mode == NO_OVERWRITE) { | 5444 if (mode == NO_OVERWRITE) { |
5544 // In the case where there is no chance of an overwritable float we may as | 5445 // In the case where there is no chance of an overwritable float we may as |
5545 // well do the allocation immediately while r0 and r1 are untouched. | 5446 // well do the allocation immediately while r0 and r1 are untouched. |
5546 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5447 __ AllocateHeapNumber(r5, r6, r7, &slow); |
5547 } | 5448 } |
5548 | 5449 |
5549 // Move r0 to a double in r2-r3. | 5450 // Move r0 to a double in r2-r3. |
5550 __ tst(r0, Operand(kSmiTagMask)); | 5451 __ tst(r0, Operand(kSmiTagMask)); |
5551 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. | 5452 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. |
5552 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 5453 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); |
5553 __ b(ne, &slow); | 5454 __ b(ne, &slow); |
5554 if (mode == OVERWRITE_RIGHT) { | 5455 if (mode == OVERWRITE_RIGHT) { |
5555 __ mov(r5, Operand(r0)); // Overwrite this heap number. | 5456 __ mov(r5, Operand(r0)); // Overwrite this heap number. |
5556 } | 5457 } |
5557 if (use_fp_registers) { | 5458 if (use_fp_registers) { |
5558 CpuFeatures::Scope scope(VFP3); | 5459 CpuFeatures::Scope scope(VFP3); |
5559 // Load the double from tagged HeapNumber r0 to d7. | 5460 // Load the double from tagged HeapNumber r0 to d7. |
5560 __ sub(r7, r0, Operand(kHeapObjectTag)); | 5461 __ sub(r7, r0, Operand(kHeapObjectTag)); |
5561 __ vldr(d7, r7, HeapNumber::kValueOffset); | 5462 __ vldr(d7, r7, HeapNumber::kValueOffset); |
5562 } else { | 5463 } else { |
5563 // Calling convention says that second double is in r2 and r3. | 5464 // Calling convention says that second double is in r2 and r3. |
5564 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 5465 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
5565 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); | 5466 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4)); |
5566 } | 5467 } |
5567 __ jmp(&finished_loading_r0); | 5468 __ jmp(&finished_loading_r0); |
5568 __ bind(&r0_is_smi); | 5469 __ bind(&r0_is_smi); |
5569 if (mode == OVERWRITE_RIGHT) { | 5470 if (mode == OVERWRITE_RIGHT) { |
5570 // We can't overwrite a Smi so get address of new heap number into r5. | 5471 // We can't overwrite a Smi so get address of new heap number into r5. |
5571 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5472 __ AllocateHeapNumber(r5, r6, r7, &slow); |
5572 } | 5473 } |
5573 | 5474 |
5574 if (use_fp_registers) { | 5475 if (use_fp_registers) { |
5575 CpuFeatures::Scope scope(VFP3); | 5476 CpuFeatures::Scope scope(VFP3); |
5576 // Convert smi in r0 to double in d7. | 5477 // Convert smi in r0 to double in d7. |
5577 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | 5478 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
5578 __ vmov(s15, r7); | 5479 __ vmov(s15, r7); |
5579 __ vcvt(d7, s15); | 5480 __ vcvt_f64_s32(d7, s15); |
5580 } else { | 5481 } else { |
5581 // Write Smi from r0 to r3 and r2 in double format. | 5482 // Write Smi from r0 to r3 and r2 in double format. |
5582 __ mov(r7, Operand(r0)); | 5483 __ mov(r7, Operand(r0)); |
5583 ConvertToDoubleStub stub3(r3, r2, r7, r6); | 5484 ConvertToDoubleStub stub3(r3, r2, r7, r6); |
5584 __ push(lr); | 5485 __ push(lr); |
5585 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); | 5486 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); |
5586 __ pop(lr); | 5487 __ pop(lr); |
5587 } | 5488 } |
5588 | 5489 |
5589 __ bind(&finished_loading_r0); | 5490 __ bind(&finished_loading_r0); |
(...skipping 13 matching lines...) Expand all Loading... | |
5603 __ vldr(d6, r7, HeapNumber::kValueOffset); | 5504 __ vldr(d6, r7, HeapNumber::kValueOffset); |
5604 } else { | 5505 } else { |
5605 // Calling convention says that first double is in r0 and r1. | 5506 // Calling convention says that first double is in r0 and r1. |
5606 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 5507 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
5607 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); | 5508 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4)); |
5608 } | 5509 } |
5609 __ jmp(&finished_loading_r1); | 5510 __ jmp(&finished_loading_r1); |
5610 __ bind(&r1_is_smi); | 5511 __ bind(&r1_is_smi); |
5611 if (mode == OVERWRITE_LEFT) { | 5512 if (mode == OVERWRITE_LEFT) { |
5612 // We can't overwrite a Smi so get address of new heap number into r5. | 5513 // We can't overwrite a Smi so get address of new heap number into r5. |
5613 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5514 __ AllocateHeapNumber(r5, r6, r7, &slow); |
5614 } | 5515 } |
5615 | 5516 |
5616 if (use_fp_registers) { | 5517 if (use_fp_registers) { |
5617 CpuFeatures::Scope scope(VFP3); | 5518 CpuFeatures::Scope scope(VFP3); |
5618 // Convert smi in r1 to double in d6. | 5519 // Convert smi in r1 to double in d6. |
5619 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); | 5520 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); |
5620 __ vmov(s13, r7); | 5521 __ vmov(s13, r7); |
5621 __ vcvt(d6, s13); | 5522 __ vcvt_f64_s32(d6, s13); |
5622 } else { | 5523 } else { |
5623 // Write Smi from r1 to r1 and r0 in double format. | 5524 // Write Smi from r1 to r1 and r0 in double format. |
5624 __ mov(r7, Operand(r1)); | 5525 __ mov(r7, Operand(r1)); |
5625 ConvertToDoubleStub stub4(r1, r0, r7, r6); | 5526 ConvertToDoubleStub stub4(r1, r0, r7, r6); |
5626 __ push(lr); | 5527 __ push(lr); |
5627 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); | 5528 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); |
5628 __ pop(lr); | 5529 __ pop(lr); |
5629 } | 5530 } |
5630 | 5531 |
5631 __ bind(&finished_loading_r1); | 5532 __ bind(&finished_loading_r1); |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5738 // how much to shift down. | 5639 // how much to shift down. |
5739 __ rsb(dest, dest, Operand(30)); | 5640 __ rsb(dest, dest, Operand(30)); |
5740 } | 5641 } |
5741 __ bind(&right_exponent); | 5642 __ bind(&right_exponent); |
5742 if (CpuFeatures::IsSupported(VFP3)) { | 5643 if (CpuFeatures::IsSupported(VFP3)) { |
5743 CpuFeatures::Scope scope(VFP3); | 5644 CpuFeatures::Scope scope(VFP3); |
5744 // ARMv7 VFP3 instructions implementing double precision to integer | 5645 // ARMv7 VFP3 instructions implementing double precision to integer |
5745 // conversion using round to zero. | 5646 // conversion using round to zero. |
5746 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | 5647 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); |
5747 __ vmov(d7, scratch2, scratch); | 5648 __ vmov(d7, scratch2, scratch); |
5748 __ vcvt(s15, d7); | 5649 __ vcvt_s32_f64(s15, d7); |
5749 __ vmov(dest, s15); | 5650 __ vmov(dest, s15); |
5750 } else { | 5651 } else { |
5751 // Get the top bits of the mantissa. | 5652 // Get the top bits of the mantissa. |
5752 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); | 5653 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); |
5753 // Put back the implicit 1. | 5654 // Put back the implicit 1. |
5754 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); | 5655 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); |
5755 // Shift up the mantissa bits to take up the space the exponent used to | 5656 // Shift up the mantissa bits to take up the space the exponent used to |
5756 // take. We just orred in the implicit bit so that took care of one and | 5657 // take. We just orred in the implicit bit so that took care of one and |
5757 // we want to leave the sign bit 0 so we subtract 2 bits from the shift | 5658 // we want to leave the sign bit 0 so we subtract 2 bits from the shift |
5758 // distance. | 5659 // distance. |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5850 break; | 5751 break; |
5851 } | 5752 } |
5852 case OVERWRITE_LEFT: { | 5753 case OVERWRITE_LEFT: { |
5853 __ tst(r1, Operand(kSmiTagMask)); | 5754 __ tst(r1, Operand(kSmiTagMask)); |
5854 __ b(eq, &have_to_allocate); | 5755 __ b(eq, &have_to_allocate); |
5855 __ mov(r5, Operand(r1)); | 5756 __ mov(r5, Operand(r1)); |
5856 break; | 5757 break; |
5857 } | 5758 } |
5858 case NO_OVERWRITE: { | 5759 case NO_OVERWRITE: { |
5859 // Get a new heap number in r5. r6 and r7 are scratch. | 5760 // Get a new heap number in r5. r6 and r7 are scratch. |
5860 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5761 __ AllocateHeapNumber(r5, r6, r7, &slow); |
5861 } | 5762 } |
5862 default: break; | 5763 default: break; |
5863 } | 5764 } |
5864 __ bind(&got_a_heap_number); | 5765 __ bind(&got_a_heap_number); |
5865 // r2: Answer as signed int32. | 5766 // r2: Answer as signed int32. |
5866 // r5: Heap number to write answer into. | 5767 // r5: Heap number to write answer into. |
5867 | 5768 |
5868 // Nothing can go wrong now, so move the heap number to r0, which is the | 5769 // Nothing can go wrong now, so move the heap number to r0, which is the |
5869 // result. | 5770 // result. |
5870 __ mov(r0, Operand(r5)); | 5771 __ mov(r0, Operand(r5)); |
5871 | 5772 |
5872 // Tail call that writes the int32 in r2 to the heap number in r0, using | 5773 // Tail call that writes the int32 in r2 to the heap number in r0, using |
5873 // r3 as scratch. r0 is preserved and returned. | 5774 // r3 as scratch. r0 is preserved and returned. |
5874 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | 5775 WriteInt32ToHeapNumberStub stub(r2, r0, r3); |
5875 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 5776 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
5876 | 5777 |
5877 if (mode_ != NO_OVERWRITE) { | 5778 if (mode_ != NO_OVERWRITE) { |
5878 __ bind(&have_to_allocate); | 5779 __ bind(&have_to_allocate); |
5879 // Get a new heap number in r5. r6 and r7 are scratch. | 5780 // Get a new heap number in r5. r6 and r7 are scratch. |
5880 AllocateHeapNumber(masm, &slow, r5, r6, r7); | 5781 __ AllocateHeapNumber(r5, r6, r7, &slow); |
5881 __ jmp(&got_a_heap_number); | 5782 __ jmp(&got_a_heap_number); |
5882 } | 5783 } |
5883 | 5784 |
5884 // If all else failed then we go to the runtime system. | 5785 // If all else failed then we go to the runtime system. |
5885 __ bind(&slow); | 5786 __ bind(&slow); |
5886 __ push(r1); // restore stack | 5787 __ push(r1); // restore stack |
5887 __ push(r0); | 5788 __ push(r0); |
5888 switch (op_) { | 5789 switch (op_) { |
5889 case Token::BIT_OR: | 5790 case Token::BIT_OR: |
5890 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); | 5791 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); |
(...skipping 397 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6288 | 6189 |
6289 __ bind(&try_float); | 6190 __ bind(&try_float); |
6290 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE); | 6191 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE); |
6291 __ b(ne, &slow); | 6192 __ b(ne, &slow); |
6292 // r0 is a heap number. Get a new heap number in r1. | 6193 // r0 is a heap number. Get a new heap number in r1. |
6293 if (overwrite_) { | 6194 if (overwrite_) { |
6294 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 6195 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
6295 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | 6196 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. |
6296 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 6197 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
6297 } else { | 6198 } else { |
6298 AllocateHeapNumber(masm, &slow, r1, r2, r3); | 6199 __ AllocateHeapNumber(r1, r2, r3, &slow); |
6299 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 6200 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
6300 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 6201 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
6301 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); | 6202 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); |
6302 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | 6203 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. |
6303 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); | 6204 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); |
6304 __ mov(r0, Operand(r1)); | 6205 __ mov(r0, Operand(r1)); |
6305 } | 6206 } |
6306 } else if (op_ == Token::BIT_NOT) { | 6207 } else if (op_ == Token::BIT_NOT) { |
6307 // Check if the operand is a heap number. | 6208 // Check if the operand is a heap number. |
6308 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE); | 6209 __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE); |
6309 __ b(ne, &slow); | 6210 __ b(ne, &slow); |
6310 | 6211 |
6311 // Convert the heap number is r0 to an untagged integer in r1. | 6212 // Convert the heap number is r0 to an untagged integer in r1. |
6312 GetInt32(masm, r0, r1, r2, r3, &slow); | 6213 GetInt32(masm, r0, r1, r2, r3, &slow); |
6313 | 6214 |
6314 // Do the bitwise operation (move negated) and check if the result | 6215 // Do the bitwise operation (move negated) and check if the result |
6315 // fits in a smi. | 6216 // fits in a smi. |
6316 Label try_float; | 6217 Label try_float; |
6317 __ mvn(r1, Operand(r1)); | 6218 __ mvn(r1, Operand(r1)); |
6318 __ add(r2, r1, Operand(0x40000000), SetCC); | 6219 __ add(r2, r1, Operand(0x40000000), SetCC); |
6319 __ b(mi, &try_float); | 6220 __ b(mi, &try_float); |
6320 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); | 6221 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); |
6321 __ b(&done); | 6222 __ b(&done); |
6322 | 6223 |
6323 __ bind(&try_float); | 6224 __ bind(&try_float); |
6324 if (!overwrite_) { | 6225 if (!overwrite_) { |
6325 // Allocate a fresh heap number, but don't overwrite r0 until | 6226 // Allocate a fresh heap number, but don't overwrite r0 until |
6326 // we're sure we can do it without going through the slow case | 6227 // we're sure we can do it without going through the slow case |
6327 // that needs the value in r0. | 6228 // that needs the value in r0. |
6328 AllocateHeapNumber(masm, &slow, r2, r3, r4); | 6229 __ AllocateHeapNumber(r2, r3, r4, &slow); |
6329 __ mov(r0, Operand(r2)); | 6230 __ mov(r0, Operand(r2)); |
6330 } | 6231 } |
6331 | 6232 |
6332 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not | 6233 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not |
6333 // have to set up a frame. | 6234 // have to set up a frame. |
6334 WriteInt32ToHeapNumberStub stub(r1, r0, r2); | 6235 WriteInt32ToHeapNumberStub stub(r1, r0, r2); |
6335 __ push(lr); | 6236 __ push(lr); |
6336 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); | 6237 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); |
6337 __ pop(lr); | 6238 __ pop(lr); |
6338 } else { | 6239 } else { |
(...skipping 1618 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7957 | 7858 |
7958 // Just jump to runtime to add the two strings. | 7859 // Just jump to runtime to add the two strings. |
7959 __ bind(&string_add_runtime); | 7860 __ bind(&string_add_runtime); |
7960 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 7861 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
7961 } | 7862 } |
7962 | 7863 |
7963 | 7864 |
7964 #undef __ | 7865 #undef __ |
7965 | 7866 |
7966 } } // namespace v8::internal | 7867 } } // namespace v8::internal |
OLD | NEW |